Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
authorDavid S. Miller <davem@davemloft.net>
Mon, 9 Mar 2015 19:58:21 +0000 (15:58 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 9 Mar 2015 19:58:21 +0000 (15:58 -0400)
Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains Netfilter updates for your net-next
tree. Basically, improvements for the packet rejection infrastructure,
deprecation of CLUSTERIP, cleanups for nf_tables and some untangling for
br_netfilter. More specifically they are:

1) Send packet to reset flow if checksum is valid, from Florian Westphal.

2) Fix nf_tables reject bridge from the input chain, also from Florian.

3) Deprecate the CLUSTERIP target, the cluster match supersedes it in
   functionality and it's known to have problems.

4) A couple of cleanups for nf_tables rule tracing infrastructure, from
   Patrick McHardy.

5) Another cleanup to place transaction declarations at the bottom of
   nf_tables.h, also from Patrick.

6) Consolidate Kconfig dependencies wrt. NF_TABLES.

7) Limit table names to 32 bytes in nf_tables.

8) mac header copying in bridge netfilter is already required when
   calling ip_fragment(), from Florian Westphal.

9) move nf_bridge_update_protocol() to br_netfilter.c, also from
   Florian.

10) Small refactor in br_netfilter in the transmission path, again from
    Florian.

11) Move br_nf_pre_routing_finish_bridge_slow() to br_netfilter.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1687 files changed:
Documentation/ABI/testing/sysfs-driver-samsung-laptop
Documentation/ABI/testing/sysfs-driver-toshiba_acpi [new file with mode: 0644]
Documentation/DocBook/kgdb.tmpl
Documentation/cgroups/unified-hierarchy.txt
Documentation/clk.txt
Documentation/device-mapper/dm-crypt.txt
Documentation/devicetree/bindings/clock/exynos7-clock.txt
Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
Documentation/devicetree/bindings/clock/qcom,lcc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qoriq-clock.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/clock/ti,cdce706.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/fapll.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/img-mdc-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/dma/snps-dma.txt
Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
Documentation/devicetree/bindings/i2c/i2c-ocores.txt
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
Documentation/devicetree/bindings/i2c/trivial-devices.txt
Documentation/devicetree/bindings/mfd/da9063.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/qcom-rpm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mips/cavium/cib.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
Documentation/devicetree/bindings/mtd/atmel-nand.txt
Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
Documentation/devicetree/bindings/mtd/gpmi-nand.txt
Documentation/devicetree/bindings/mtd/hisi504-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/mtd-physmap.txt
Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/pwm/img-pwm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pwm/pwm-sun4i.txt [new file with mode: 0644]
Documentation/devicetree/bindings/thermal/exynos-thermal.txt
Documentation/devicetree/bindings/thermal/thermal.txt
Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/mtk-wdt.txt [new file with mode: 0644]
Documentation/dmaengine/provider.txt
Documentation/filesystems/Locking
Documentation/filesystems/dlmfs.txt
Documentation/filesystems/ocfs2.txt
Documentation/filesystems/overlayfs.txt
Documentation/i2c/functionality
Documentation/ia64/paravirt_ops.txt [deleted file]
Documentation/input/alps.txt
Documentation/kbuild/makefiles.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/mpls-sysctl.txt [new file with mode: 0644]
Documentation/networking/s2io.txt
Documentation/networking/vxge.txt
Documentation/virtual/00-INDEX
Documentation/virtual/paravirt_ops.txt [new file with mode: 0644]
Documentation/x86/zero-page.txt
Kbuild
MAINTAINERS
Makefile
arch/alpha/include/asm/uaccess.h
arch/arc/boot/dts/abilis_tb10x.dtsi
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am437x-idk-evm.dts
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9x5_macb0.dtsi
arch/arm/boot/dts/at91sam9x5_macb1.dtsi
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm.dts
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/sama5d3_emac.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/uaccess.h
arch/arm/kernel/perf_event_cpu.c
arch/arm/mach-asm9260/Kconfig
arch/arm/mach-at91/Kconfig
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/generic.h
arch/arm/mach-at91/pm.c
arch/arm/mach-axxia/axxia.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-bcm/brcmstb.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/da8xx-dt.c
arch/arm/mach-davinci/mux.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-hisi/hisilicon.c
arch/arm/mach-imx/mmdc.c
arch/arm/mach-ixp4xx/include/mach/io.h
arch/arm/mach-keystone/keystone.c
arch/arm/mach-keystone/pm_domain.c
arch/arm/mach-mmp/time.c
arch/arm/mach-msm/board-halibut.c
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-mvebu/system-controller.c
arch/arm/mach-nspire/nspire.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/cclock3xxx_data.c [deleted file]
arch/arm/mach-omap2/clock.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/clock_common_data.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/dpll44xx.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/prm.h
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-omap2/prm44xx.c
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-prima2/Kconfig
arch/arm/mach-prima2/common.c
arch/arm/mach-prima2/platsmp.c
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/lpd270.c
arch/arm/mach-realview/core.c
arch/arm/mach-realview/realview_eb.c
arch/arm/mach-rockchip/Kconfig
arch/arm/mach-rockchip/pm.h
arch/arm/mach-s5pv210/s5pv210.c
arch/arm/mach-sa1100/neponset.c
arch/arm/mach-sa1100/pleb.c
arch/arm/mach-shmobile/setup-emev2.c
arch/arm/mach-sti/Kconfig
arch/arm/mach-tegra/tegra.c
arch/arm/mach-ux500/pm_domains.c
arch/arm/mach-versatile/versatile_dt.c
arch/arm/mach-vexpress/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm64/boot/dts/arm/foundation-v8.dts
arch/arm64/boot/dts/arm/juno.dts
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/crypto/Makefile
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpuidle.h
arch/arm64/include/asm/insn.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/insn.c
arch/arm64/kernel/psci-call.S [new file with mode: 0644]
arch/arm64/kernel/psci.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/init.c
arch/avr32/include/asm/uaccess.h
arch/avr32/mach-at32ap/at32ap700x.c
arch/blackfin/include/asm/bfin_rotary.h [deleted file]
arch/blackfin/include/asm/uaccess.h
arch/blackfin/mach-bf527/boards/ad7160eval.c
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/frv/include/asm/pgtable.h
arch/frv/include/asm/segment.h
arch/ia64/include/asm/uaccess.h
arch/m32r/include/asm/pgtable-2level.h
arch/m32r/include/asm/uaccess.h
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/include/asm/segment.h
arch/m68k/include/asm/uaccess_mm.h
arch/metag/include/asm/processor.h
arch/metag/include/asm/uaccess.h
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/alchemy/common/clock.c
arch/mips/alchemy/common/setup.c
arch/mips/bcm3384/irq.c
arch/mips/boot/Makefile
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/csrc-octeon.c
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/setup.c
arch/mips/configs/malta_qemu_32r6_defconfig [new file with mode: 0644]
arch/mips/fw/arc/misc.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/atomic.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/compiler.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/cpu-type.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/edac.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/gio_device.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/local.h
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
arch/mips/include/asm/mach-cavium-octeon/war.h
arch/mips/include/asm/mach-jz4740/jz4740_nand.h
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
arch/mips/include/asm/mips-r2-to-r6-emul.h [new file with mode: 0644]
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/module.h
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
arch/mips/include/asm/octeon/cvmx-rst-defs.h [new file with mode: 0644]
arch/mips/include/asm/octeon/octeon-model.h
arch/mips/include/asm/octeon/octeon.h
arch/mips/include/asm/pci.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/sgialib.h
arch/mips/include/asm/siginfo.h [deleted file]
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/spram.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/siginfo.h
arch/mips/jz4740/board-qi_lb60.c
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/elf.c
arch/mips/kernel/entry.S
arch/mips/kernel/genex.S
arch/mips/kernel/idle.c
arch/mips/kernel/mips-r2-to-r6-emul.c [new file with mode: 0644]
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/spram.c
arch/mips/kernel/syscall.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/lib/Makefile
arch/mips/lib/memcpy.S
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-r4k.c
arch/mips/mm/fault.c
arch/mips/mm/page.c
arch/mips/mm/sc-mips.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c
arch/mips/mm/uasm-mips.c
arch/mips/mm/uasm.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/pci/pci-bcm1480.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pcie-octeon.c
arch/mips/pmcs-msp71xx/Kconfig
arch/mips/sgi-ip22/ip22-gio.c
arch/mips/sgi-ip27/ip27-reset.c
arch/mips/sgi-ip32/ip32-reset.c
arch/mn10300/include/asm/pgtable.h
arch/mn10300/unit-asb2305/pci-iomap.c [deleted file]
arch/openrisc/include/asm/uaccess.h
arch/parisc/Makefile
arch/parisc/include/asm/pgtable.h
arch/powerpc/Makefile
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/kernel/time.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/pci_io.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/topology.h
arch/s390/kernel/cache.c
arch/s390/kernel/early.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/mm/mmap.c
arch/s390/pci/pci.c
arch/sh/include/asm/segment.h
arch/sh/include/asm/uaccess.h
arch/sh/include/asm/uaccess_64.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile.um
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/aslr.c
arch/x86/boot/compressed/efi_stub_64.S
arch/x86/boot/compressed/efi_thunk_64.S [new file with mode: 0644]
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/imr.h [new file with mode: 0644]
arch/x86/include/asm/lguest_hcall.h
arch/x86/include/asm/page_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/uaccess.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/microcode/intel_early.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/irq.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/kvm.c
arch/x86/kernel/module.c
arch/x86/kernel/setup.c
arch/x86/kernel/uprobes.c
arch/x86/lguest/Kconfig
arch/x86/lguest/boot.c
arch/x86/mm/init.c
arch/x86/mm/mmap.c
arch/x86/platform/Makefile
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/efi_thunk_64.S
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-quark/Makefile [new file with mode: 0644]
arch/x86/platform/intel-quark/imr.c [new file with mode: 0644]
arch/x86/platform/intel-quark/imr_selftest.c [new file with mode: 0644]
arch/x86/xen/enlighten.c
arch/x86/xen/spinlock.c
arch/xtensa/include/asm/uaccess.h
block/blk-throttle.c
drivers/acpi/Makefile
drivers/acpi/acpi_lpat.c [new file with mode: 0644]
drivers/acpi/acpi_lpss.c
drivers/acpi/ec.c
drivers/acpi/pmic/intel_pmic.c
drivers/acpi/resource.c
drivers/acpi/video.c
drivers/bcma/Kconfig
drivers/bcma/Makefile
drivers/bcma/bcma_private.h
drivers/bcma/driver_gpio.c
drivers/bcma/driver_pci.c
drivers/bcma/driver_pci_host.c
drivers/bcma/driver_pcie2.c
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/btusb.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/virtio_console.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/at91/clk-programmable.c
drivers/clk/bcm/clk-kona.c
drivers/clk/clk-asm9260.c [new file with mode: 0644]
drivers/clk/clk-cdce706.c [new file with mode: 0644]
drivers/clk/clk-composite.c
drivers/clk/clk-divider.c
drivers/clk/clk-gate.c
drivers/clk/clk-mux.c
drivers/clk/clk-ppc-corenet.c [deleted file]
drivers/clk/clk-qoriq.c [new file with mode: 0644]
drivers/clk/clk.c
drivers/clk/clk.h
drivers/clk/clkdev.c
drivers/clk/hisilicon/clk-hi3620.c
drivers/clk/mmp/clk-mix.c
drivers/clk/pxa/Makefile
drivers/clk/pxa/clk-pxa.c
drivers/clk/pxa/clk-pxa3xx.c [new file with mode: 0644]
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/clk-pll.c
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-regmap-divider.c [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-divider.h [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-mux.c [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-mux.h [new file with mode: 0644]
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/qcom/lcc-ipq806x.c [new file with mode: 0644]
drivers/clk/qcom/lcc-msm8960.c [new file with mode: 0644]
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/samsung/clk-exynos3250.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos4415.c
drivers/clk/samsung/clk-exynos7.c
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/shmobile/Makefile
drivers/clk/shmobile/clk-div6.c
drivers/clk/shmobile/clk-r8a73a4.c [new file with mode: 0644]
drivers/clk/shmobile/clk-rcar-gen2.c
drivers/clk/st/clk-flexgen.c
drivers/clk/st/clkgen-mux.c
drivers/clk/sunxi/Makefile
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/sunxi/clk-sun9i-core.c
drivers/clk/sunxi/clk-sun9i-mmc.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/tegra/Makefile
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-periph.c
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk.c
drivers/clk/ti/Makefile
drivers/clk/ti/clk-3xxx-legacy.c [new file with mode: 0644]
drivers/clk/ti/clk-3xxx.c
drivers/clk/ti/clk-44xx.c
drivers/clk/ti/clk-54xx.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clk-816x.c [new file with mode: 0644]
drivers/clk/ti/clk.c
drivers/clk/ti/clock.h [new file with mode: 0644]
drivers/clk/ti/composite.c
drivers/clk/ti/divider.c
drivers/clk/ti/dpll.c
drivers/clk/ti/fapll.c [new file with mode: 0644]
drivers/clk/ti/gate.c
drivers/clk/ti/interface.c
drivers/clk/ti/mux.c
drivers/clk/ux500/clk-prcc.c
drivers/clk/ux500/clk-prcmu.c
drivers/clk/zynq/clkc.c
drivers/clocksource/Kconfig
drivers/clocksource/mtk_timer.c
drivers/clocksource/pxa_timer.c
drivers/connector/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.powerpc
drivers/cpufreq/Makefile
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpuidle/cpuidle-powernv.c
drivers/crypto/ux500/cryp/cryp_core.c
drivers/crypto/ux500/hash/hash_core.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/at_xdmac.c
drivers/dma/bcm2835-dma.c
drivers/dma/coh901318.c
drivers/dma/cppi41.c
drivers/dma/dma-jz4740.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/dw/platform.c
drivers/dma/dw/regs.h
drivers/dma/edma.c
drivers/dma/ep93xx_dma.c
drivers/dma/fsl-edma.c
drivers/dma/fsldma.c
drivers/dma/fsldma.h
drivers/dma/img-mdc-dma.c [new file with mode: 0644]
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/ioat/dma_v3.c
drivers/dma/ioat/hw.h
drivers/dma/ioat/pci.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/k3dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/moxart-dma.c
drivers/dma/mpc512x_dma.c
drivers/dma/mv_xor.c
drivers/dma/mxs-dma.c
drivers/dma/nbpfaxi.c
drivers/dma/of-dma.c
drivers/dma/omap-dma.c
drivers/dma/pch_dma.c
drivers/dma/pl330.c
drivers/dma/qcom_bam_dma.c
drivers/dma/s3c24xx-dma.c
drivers/dma/sa11x0-dma.c
drivers/dma/sh/Kconfig
drivers/dma/sh/Makefile
drivers/dma/sh/rcar-dmac.c [new file with mode: 0644]
drivers/dma/sh/rcar-hpbdma.c
drivers/dma/sh/shdma-base.c
drivers/dma/sh/shdmac.c
drivers/dma/sirf-dma.c
drivers/dma/ste_dma40.c
drivers/dma/sun6i-dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
drivers/dma/xilinx/xilinx_vdma.c
drivers/edac/amd64_edac.c
drivers/edac/sb_edac.c
drivers/firewire/core-transaction.c
drivers/firewire/ohci.c
drivers/firewire/sbp2.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/gpio/gpio-tps65912.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/hdmi.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-microsoft.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/ads7828.c
drivers/hwmon/pmbus/Kconfig
drivers/i2c/Kconfig
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-bcm-iproc.c [new file with mode: 0644]
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-baytrail.c [new file with mode: 0644]
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-pmcmsp.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/iio/Kconfig
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_common.h
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_diag.c
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_eeprom.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_intr.c
drivers/infiniband/hw/qib/qib_keys.c
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mmap.c
drivers/infiniband/hw/qib/qib_mr.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_sd7220.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/qib/qib_twsi.c
drivers/infiniband/hw/qib/qib_tx.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/hw/qib/qib_verbs_mcast.c
drivers/infiniband/hw/qib/qib_wc_x86_64.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/joystick/adi.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/bfin_rotary.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/cypress_ps2.c
drivers/input/mouse/cypress_ps2.h
drivers/input/mouse/focaltech.c
drivers/input/mouse/focaltech.h
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/irqchip/irq-mips-gic.c
drivers/isdn/hardware/mISDN/Kconfig
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/lguest/Makefile
drivers/lguest/core.c
drivers/lguest/hypercalls.c
drivers/lguest/lg.h
drivers/lguest/lguest_device.c [deleted file]
drivers/lguest/lguest_user.c
drivers/lguest/page_tables.c
drivers/lguest/x86/core.c
drivers/md/Kconfig
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/persistent-data/Kconfig
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/mfd/88pm860x-core.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/da9063-core.c
drivers/mfd/da9063-i2c.c
drivers/mfd/da9150-core.c [new file with mode: 0644]
drivers/mfd/davinci_voicecodec.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/dln2.c
drivers/mfd/hi6421-pmic-core.c
drivers/mfd/intel_soc_pmic_core.c
drivers/mfd/intel_soc_pmic_core.h
drivers/mfd/intel_soc_pmic_crc.c
drivers/mfd/lm3533-core.c
drivers/mfd/lpc_sch.c
drivers/mfd/max77686.c
drivers/mfd/mc13xxx-i2c.c
drivers/mfd/mc13xxx-spi.c
drivers/mfd/omap-usb-host.c
drivers/mfd/pcf50633-core.c
drivers/mfd/qcom_rpm.c [new file with mode: 0644]
drivers/mfd/retu-mfd.c
drivers/mfd/rt5033.c [new file with mode: 0644]
drivers/mfd/rtsx_usb.c
drivers/mfd/smsc-ece1099.c
drivers/mfd/sun6i-prcm.c
drivers/mfd/tps65217.c
drivers/mfd/tps65218.c
drivers/mfd/twl-core.c
drivers/mfd/twl6040.c
drivers/mfd/wm8994-core.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/chips/map_ram.c
drivers/mtd/chips/map_rom.c
drivers/mtd/devices/st_spi_fsm.c
drivers/mtd/maps/physmap_of.c
drivers/mtd/mtdblock.c
drivers/mtd/mtdconcat.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/Makefile
drivers/mtd/nand/ams-delta.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/hisi504_nand.c [new file with mode: 0644]
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/sunxi_nand.c
drivers/mtd/nftlmount.c
drivers/mtd/spi-nor/fsl-quadspi.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/Kconfig
drivers/net/appletalk/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/can/bfin_can.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/8390/axnet_cs.c
drivers/net/ethernet/8390/pcnet_cs.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/amd8111e.h
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apple/mace.c
drivers/net/ethernet/apple/macmace.c
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
drivers/net/ethernet/brocade/bna/bfi.h
drivers/net/ethernet/brocade/bna/bna_hw_defs.h
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/Makefile
drivers/net/ethernet/cadence/at91_ether.c [deleted file]
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_common.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_iov.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_configfs.c [deleted file]
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sfc/vfdi.h
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/dmascc.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/mkiss.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/macvtap.c
drivers/net/netconsole.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/usb/Kconfig
drivers/net/usb/asix_devices.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/hso.c
drivers/net/usb/lg-vl600.c
drivers/net/usb/plusb.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/wan/cosa.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_mci.h
drivers/net/wireless/ath/ath9k/ar9003_wow.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/reg_mci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/reg_wow.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/ethtool.c
drivers/net/wireless/ath/wil6210/fw.c
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/atmel.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/core.c
drivers/net/wireless/brcm80211/brcmfmac/flowring.c
drivers/net/wireless/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/cw1200/txrx.c
drivers/net/wireless/hostap/hostap_80211_tx.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/hostap/hostap_wlan.h
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/libertas/debugfs.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/cam.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/core.h
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/rtl8188ee/def.h
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
drivers/net/wireless/rtlwifi/rtl8192ce/def.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
drivers/net/wireless/rtlwifi/rtl8192de/def.h
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
drivers/net/wireless/rtlwifi/rtl8192se/def.h
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/def.h
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/rf.h
drivers/net/wireless/rtlwifi/rtl8821ae/def.h
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/of_pci.c
drivers/pci/pcie/aer/Kconfig
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/classmate-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/pnp/resource.c
drivers/pwm/Kconfig
drivers/pwm/Makefile
drivers/pwm/core.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/pwm-img.c [new file with mode: 0644]
drivers/pwm/pwm-sti.c
drivers/pwm/pwm-sun4i.c [new file with mode: 0644]
drivers/pwm/pwm-tegra.c
drivers/rapidio/devices/tsi721_dma.c
drivers/regulator/qcom_rpm-regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-ds1685.c
drivers/scsi/am53c974.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/hpsa.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/sg.c
drivers/scsi/virtio_scsi.c
drivers/scsi/wd719x.c
drivers/sh/pm_runtime.c
drivers/spi/Kconfig
drivers/ssb/main.c
drivers/staging/board/Kconfig
drivers/staging/emxx_udc/Kconfig
drivers/staging/iio/Kconfig
drivers/staging/lustre/lustre/llite/dcache.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/namei.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_core.h [deleted file]
drivers/target/iscsi/iscsi_target_datain_values.c
drivers/target/iscsi/iscsi_target_device.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nodeattrib.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_stat.h [deleted file]
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tq.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/thermal/int340x_thermal/Makefile
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/thermal/int340x_thermal/int3402_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/int340x_thermal/int340x_thermal_zone.c [new file with mode: 0644]
drivers/thermal/int340x_thermal/int340x_thermal_zone.h [new file with mode: 0644]
drivers/thermal/int340x_thermal/processor_thermal_device.c
drivers/thermal/intel_powerclamp.c
drivers/thermal/intel_soc_dts_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/samsung/Kconfig
drivers/thermal/samsung/Makefile
drivers/thermal/samsung/exynos_thermal_common.c [deleted file]
drivers/thermal/samsung/exynos_thermal_common.h [deleted file]
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/samsung/exynos_tmu.h
drivers/thermal/samsung/exynos_tmu_data.c [deleted file]
drivers/thermal/step_wise.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/tty/serial/Kconfig
drivers/usb/gadget/Kconfig
drivers/usb/gadget/legacy/Kconfig
drivers/usb/gadget/udc/Kconfig
drivers/usb/phy/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vfio/pci/vfio_pci_private.h
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/virtio/Kconfig
drivers/virtio/Makefile
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c [new file with mode: 0644]
drivers/virtio/virtio_ring.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/bcm47xx_wdt.c
drivers/watchdog/da9063_wdt.c
drivers/watchdog/dw_wdt.c
drivers/watchdog/gpio_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/imgpdc_wdt.c [new file with mode: 0644]
drivers/watchdog/imx2_wdt.c
drivers/watchdog/it87_wdt.c
drivers/watchdog/jz4740_wdt.c
drivers/watchdog/mtk_wdt.c [new file with mode: 0644]
drivers/watchdog/omap_wdt.c
drivers/watchdog/retu_wdt.c
drivers/watchdog/rt2880_wdt.c
drivers/watchdog/twl4030_wdt.c
drivers/watchdog/w83627hf_wdt.c
drivers/xen/Makefile
drivers/xen/preempt.c [new file with mode: 0644]
drivers/xen/privcmd.c
drivers/xen/xen-scsiback.c
fs/9p/vfs_inode.c
fs/aio.c
fs/autofs4/dev-ioctl.c
fs/autofs4/expire.c
fs/autofs4/root.c
fs/bad_inode.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/free-space-cache.c
fs/btrfs/inode-item.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/raid56.h
fs/btrfs/reada.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/tests/extent-buffer-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/cachefiles/daemon.c
fs/cachefiles/interface.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/ceph/acl.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/cifs/file.c
fs/coda/dir.c
fs/configfs/configfs_internal.h
fs/configfs/dir.c
fs/configfs/file.c
fs/configfs/inode.c
fs/coredump.c
fs/dcache.c
fs/debugfs/inode.c
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/exportfs/expfs.c
fs/ext4/ext4.h
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dir.c
fs/gfs2/dir.c
fs/hfsplus/dir.c
fs/hppfs/hppfs.c
fs/internal.h
fs/jbd2/recovery.c
fs/jffs2/compr_rubin.c
fs/jffs2/dir.c
fs/jffs2/scan.c
fs/jffs2/super.c
fs/libfs.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/delegation.c
fs/nfs/direct.c
fs/nfs/filelayout/filelayout.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfs/write.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsfh.c
fs/nfsd/vfs.c
fs/nilfs2/btree.c
fs/notify/fanotify/fanotify.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/posix_acl.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/reiserfs/xattr.c
fs/super.c
fs/xfs/Makefile
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_pnfs.c [new file with mode: 0644]
fs/xfs/xfs_pnfs.h [new file with mode: 0644]
fs/xfs/xfs_qm.c
include/acpi/acpi_lpat.h [new file with mode: 0644]
include/asm-generic/pci_iomap.h
include/drm/i915_pciids.h
include/dt-bindings/clock/alphascale,asm9260.h [new file with mode: 0644]
include/dt-bindings/clock/exynos4.h
include/dt-bindings/clock/exynos7-clk.h
include/dt-bindings/clock/qcom,gcc-ipq806x.h
include/dt-bindings/clock/qcom,lcc-ipq806x.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,lcc-msm8960.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car-common.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car.h
include/dt-bindings/mfd/qcom-rpm.h [new file with mode: 0644]
include/dt-bindings/thermal/thermal_exynos.h [new file with mode: 0644]
include/linux/bcm47xx_wdt.h
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bcma/bcma_driver_gmac_cmn.h
include/linux/bcma/bcma_driver_mips.h
include/linux/bcma/bcma_driver_pci.h
include/linux/bcma/bcma_driver_pcie2.h
include/linux/ceph/ceph_fs.h
include/linux/ceph/libceph.h
include/linux/ceph/messenger.h
include/linux/ceph/mon_client.h
include/linux/clk-private.h [deleted file]
include/linux/clk-provider.h
include/linux/clk.h
include/linux/clk/sunxi.h [deleted file]
include/linux/clk/tegra.h
include/linux/clk/ti.h
include/linux/compiler.h
include/linux/dcache.h
include/linux/dmaengine.h
include/linux/fs.h
include/linux/hid-sensor-hub.h
include/linux/i2c.h
include/linux/if_bridge.h
include/linux/irqchip/mips-gic.h
include/linux/kdb.h
include/linux/lguest_launcher.h
include/linux/mfd/axp20x.h
include/linux/mfd/da9063/core.h
include/linux/mfd/da9150/core.h [new file with mode: 0644]
include/linux/mfd/da9150/registers.h [new file with mode: 0644]
include/linux/mfd/max77686-private.h
include/linux/mfd/max77686.h
include/linux/mfd/qcom_rpm.h [new file with mode: 0644]
include/linux/mfd/rt5033-private.h [new file with mode: 0644]
include/linux/mfd/rt5033.h [new file with mode: 0644]
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mtd/mtd.h
include/linux/mtd/spi-nor.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/nvme.h
include/linux/platform_data/bfin_rotary.h [new file with mode: 0644]
include/linux/platform_data/dma-dw.h
include/linux/platform_data/dma-mmp_tdma.h
include/linux/rhashtable.h
include/linux/sched.h
include/linux/socket.h
include/linux/sunrpc/metrics.h
include/linux/thermal.h
include/linux/vfio.h
include/linux/virtio_mmio.h
include/net/arp.h
include/net/ax25.h
include/net/caif/cfpkt.h
include/net/dcbnl.h
include/net/dn_neigh.h
include/net/dsa.h
include/net/inet_connection_sock.h
include/net/ip_fib.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netns/ipv4.h
include/net/netns/mpls.h [new file with mode: 0644]
include/net/sch_generic.h
include/net/switchdev.h
include/net/tcp.h
include/target/iscsi/iscsi_target_core.h [new file with mode: 0644]
include/target/iscsi/iscsi_target_stat.h [new file with mode: 0644]
include/target/iscsi/iscsi_transport.h
include/target/target_core_base.h
include/uapi/linux/btrfs.h
include/uapi/linux/dcbnl.h
include/uapi/linux/if_link.h
include/uapi/linux/nvme.h
include/uapi/linux/prctl.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/tipc_netlink.h
include/uapi/linux/vfio.h
include/uapi/linux/virtio_balloon.h
include/uapi/linux/virtio_blk.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_net.h
include/uapi/linux/virtio_pci.h
include/uapi/rdma/ib_user_verbs.h
include/xen/xen-ops.h
init/Kconfig
kernel/bpf/core.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_io.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/gcov/Makefile
kernel/livepatch/core.c
kernel/locking/rtmutex.c
kernel/printk/printk.c
kernel/rcu/tree_plugin.h
kernel/sched/auto_group.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/sched.h
kernel/sys.c
kernel/time/ntp.c
lib/Kconfig
lib/pci_iomap.c
lib/rhashtable.c
lib/test_rhashtable.c
mm/Kconfig
mm/memcontrol.c
mm/nommu.c
mm/page_alloc.c
mm/shmem.c
net/8021q/vlan_dev.c
net/9p/trans_virtio.c
net/Kconfig
net/Makefile
net/appletalk/aarp.c
net/atm/lec.c
net/atm/signaling.c
net/ax25/ax25_ip.c
net/bluetooth/bnep/netdev.c
net/bridge/br.c
net/bridge/br_forward.c
net/bridge/br_input.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_sysfs_if.c
net/caif/cffrml.c
net/caif/cfpkt_skbuff.c
net/ceph/ceph_common.c
net/ceph/ceph_strings.c
net/ceph/debugfs.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/compat.c
net/core/dev.c
net/core/ethtool.c
net/core/gen_stats.c
net/core/neighbour.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dcb/dcbnl.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/dsa/dsa.c
net/ethernet/eth.c
net/hsr/hsr_device.c
net/hsr/hsr_main.c
net/hsr/hsr_slave.c
net/ipv4/arp.c
net/ipv4/fib_frontend.c
net/ipv4/fib_rules.c
net/ipv4/fib_trie.c
net/ipv4/inet_diag.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv6/addrconf.c
net/ipv6/ip6_output.c
net/ipv6/ndisc.c
net/irda/ircomm/ircomm_tty.c
net/irda/irnet/irnet_ppp.c
net/l2tp/l2tp_eth.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/ibss.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel.c
net/mac80211/tx.c
net/mpls/Kconfig
net/mpls/Makefile
net/mpls/af_mpls.c [new file with mode: 0644]
net/mpls/internal.h [new file with mode: 0644]
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nft_compat.c
net/netfilter/nft_hash.c
net/netfilter/xt_recent.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/openvswitch/Kconfig
net/openvswitch/datapath.c
net/openvswitch/flow_netlink.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/rxrpc/ar-ack.c
net/sched/Kconfig
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_bpf.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/ematch.c
net/sched/sch_api.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/backchannel_rqst.c
net/switchdev/Kconfig
net/switchdev/switchdev.c
net/tipc/Kconfig
net/tipc/Makefile
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/discover.c
net/tipc/msg.h
net/tipc/socket.c
net/tipc/udp_media.c [new file with mode: 0644]
net/wireless/core.c
net/wireless/ibss.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/trace.h
net/wireless/wext-sme.c
scripts/Kbuild.include
scripts/Makefile.clean
scripts/gdb/linux/__init__.py [new file with mode: 0644]
scripts/kconfig/confdata.c
scripts/kconfig/merge_config.sh
scripts/package/builddeb
security/apparmor/include/apparmor.h
security/apparmor/lsm.c
security/apparmor/path.c
security/inode.c
security/integrity/Kconfig
security/integrity/evm/Kconfig
security/selinux/hooks.c
security/smack/smack_lsm.c
security/tomoyo/file.c
sound/core/pcm_native.c
sound/core/seq/seq_midi_emul.c
sound/firewire/amdtp.c
sound/firewire/bebob/bebob.c
sound/firewire/bebob/bebob_stream.c
sound/firewire/dice/dice-stream.c
sound/firewire/dice/dice.c
sound/firewire/fireworks/fireworks.c
sound/firewire/fireworks/fireworks_stream.c
sound/firewire/oxfw/oxfw-stream.c
sound/firewire/oxfw/oxfw.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/rme9652/hdspm.c
sound/soc/intel/sst-haswell-pcm.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/usb/clock.c
sound/usb/line6/driver.c
sound/usb/line6/driver.h
sound/usb/quirks.c
sound/usb/quirks.h
tools/lguest/Makefile
tools/lguest/lguest.c
tools/perf/bench/mem-memcpy.c
tools/perf/config/Makefile.arch
tools/perf/config/feature-checks/Makefile
tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
tools/perf/util/cloexec.c
tools/perf/util/evlist.h
tools/perf/util/symbol-elf.c
tools/thermal/tmon/.gitignore [new file with mode: 0644]
tools/thermal/tmon/Makefile
tools/thermal/tmon/tmon.8
tools/thermal/tmon/tmon.c
tools/thermal/tmon/tui.c

index 678819a3f8bf89e3b43e134979af2fa3f28199fd..63c1ad0212fc8624432f1691240443b9015b5d3f 100644 (file)
@@ -35,3 +35,11 @@ Contact:     Corentin Chary <corentin.chary@gmail.com>
 Description:   Use your USB ports to charge devices, even
                when your laptop is powered off.
                1 means enabled, 0 means disabled.
+
+What:          /sys/devices/platform/samsung/lid_handling
+Date:          December 11, 2014
+KernelVersion: 3.19
+Contact:       Julijonas Kikutis <julijonas.kikutis@gmail.com>
+Description:   Some Samsung laptops handle lid closing quicker and
+               only handle lid opening with this mode enabled.
+               1 means enabled, 0 means disabled.
diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_acpi b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
new file mode 100644 (file)
index 0000000..ca9c71a
--- /dev/null
@@ -0,0 +1,114 @@
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_mode
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the keyboard backlight operation mode, valid
+               values are:
+                       * 0x1  -> FN-Z
+                       * 0x2  -> AUTO (also called TIMER)
+                       * 0x8  -> ON
+                       * 0x10 -> OFF
+               Note that the kernel 3.16 onwards this file accepts all listed
+               parameters, kernel 3.15 only accepts the first two (FN-Z and
+               AUTO).
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_timeout
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the timeout of the keyboard backlight
+               whenever the operation mode is set to AUTO (or TIMER),
+               valid values range from 0-60.
+               Note that the kernel 3.15 only had support for the first
+               keyboard type, the kernel 3.16 added support for the second
+               type and the range accepted for type 2 is 1-60.
+               See the entry named "kbd_type"
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/position
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the absolute position of the built-in
+               accelereometer.
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/touchpad
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This files controls the status of the touchpad and pointing
+               stick (if available), valid values are:
+                       * 0 -> OFF
+                       * 1 -> ON
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/available_kbd_modes
+Date:          August 3, 2014
+KernelVersion: 3.16
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the supported keyboard backlight modes
+               the system supports, which can be:
+                       * 0x1  -> FN-Z
+                       * 0x2  -> AUTO (also called TIMER)
+                       * 0x8  -> ON
+                       * 0x10 -> OFF
+               Note that not all keyboard types support the listed modes.
+               See the entry named "available_kbd_modes"
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_type
+Date:          August 3, 2014
+KernelVersion: 3.16
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the current keyboard backlight type,
+               which can be:
+                       * 1 -> Type 1, supporting modes FN-Z and AUTO
+                       * 2 -> Type 2, supporting modes TIMER, ON and OFF
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/version
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the current version of the driver
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/fan
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the state of the internal fan, valid
+               values are:
+                       * 0 -> OFF
+                       * 1 -> ON
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_function_keys
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the Special Functions (hotkeys) operation
+               mode, valid values are:
+                       * 0 -> Normal Operation
+                       * 1 -> Special Functions
+               In the "Normal Operation" mode, the F{1-12} keys are as usual
+               and the hotkeys are accessed via FN-F{1-12}.
+               In the "Special Functions" mode, the F{1-12} keys trigger the
+               hotkey and the F{1-12} keys are accessed via FN-F{1-12}.
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/panel_power_on
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls whether the laptop should turn ON whenever
+               the LID is opened, valid values are:
+                       * 0 -> Disabled
+                       * 1 -> Enabled
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_three
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls whether the USB 3 functionality, valid
+               values are:
+                       * 0 -> Disabled (Acts as a regular USB 2)
+                       * 1 -> Enabled (Full USB 3 functionality)
index 2428cc04dbc84b5f7243558296f6f36fa9621fb4..f3abca7ec53d62e4bd416da9f53395aef9269855 100644 (file)
    may be configured as a kernel built-in or a kernel loadable module.
    You can only make use of <constant>kgdbwait</constant> and early
    debugging if you build kgdboc into the kernel as a built-in.
+   </para>
    <para>Optionally you can elect to activate kms (Kernel Mode
    Setting) integration.  When you use kms with kgdboc and you have a
    video driver that has atomic mode setting hooks, it is possible to
    crashes or doing analysis of memory with kdb while allowing the
    full graphics console applications to run.
    </para>
-   </para>
    <sect2 id="kgdbocArgs">
    <title>kgdboc arguments</title>
    <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para>
    </listitem>
    </orderedlist>
    </para>
-   </sect3>
    <para>NOTE: Kgdboc does not support interrupting the target via the
    gdb remote protocol.  You must manually send a sysrq-g unless you
    have a proxy that splits console output to a terminal program.
     as well as on the initial connect, or to use a debugger proxy that
     allows an unmodified gdb to do the debugging.
    </para>
+   </sect3>
    </sect2>
    </sect1>
    <sect1 id="kgdbwait">
    </para>
    </listitem>
    </orderedlist>
+  </para>
    <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
    active system console.  An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
    </para>
    <para>It is possible to use this option with kgdboc on a tty that is not a system console.
    </para>
-  </para>
   </sect1>
    <sect1 id="kgdbreboot">
    <title>Run time parameter: kgdbreboot</title>
index 71daa35ec2d9201d2a4647e0a088af3ab36bbae3..eb102fb722134758a39d7d2e17c1dd00fdc0d799 100644 (file)
@@ -404,8 +404,8 @@ supported and the interface files "release_agent" and
   be understood as an underflow into the highest possible value, -2 or
   -10M etc. do not work, so it's not consistent.
 
-  memory.low, memory.high, and memory.max will use the string
-  "infinity" to indicate and set the highest possible value.
+  memory.low, memory.high, and memory.max will use the string "max" to
+  indicate and set the highest possible value.
 
 5. Planned Changes
 
index 4ff84623d5e16eb42c17f90eb1851eea19b9d82f..0e4f90aa1c136eaa40c9d93e3586bbffdd180de4 100644 (file)
@@ -73,6 +73,8 @@ the operations defined in clk.h:
                                                unsigned long *parent_rate);
                long            (*determine_rate)(struct clk_hw *hw,
                                                unsigned long rate,
+                                               unsigned long min_rate,
+                                               unsigned long max_rate,
                                                unsigned long *best_parent_rate,
                                                struct clk_hw **best_parent_clk);
                int             (*set_parent)(struct clk_hw *hw, u8 index);
index c81839b52c4dd0102d6444c464359690f395778e..ad697781f9ac478477cfed76978b047685eda2b6 100644 (file)
@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
     Otherwise #opt_params is the number of following arguments.
 
     Example of optional parameters section:
-        1 allow_discards
+        3 allow_discards same_cpu_crypt submit_from_crypt_cpus
 
 allow_discards
     Block discard requests (a.k.a. TRIM) are passed through the crypt device.
@@ -63,6 +63,19 @@ allow_discards
     used space etc.) if the discarded blocks can be located easily on the
     device later.
 
+same_cpu_crypt
+    Perform encryption using the same cpu that IO was submitted on.
+    The default is to use an unbound workqueue so that encryption work
+    is automatically balanced between available CPUs.
+
+submit_from_crypt_cpus
+    Disable offloading writes to a separate thread after encryption.
+    There are some situations where offloading write bios from the
+    encryption threads to a single thread degrades performance
+    significantly.  The default is to offload write bios to the same
+    thread because it benefits CFQ to have writes submitted using the
+    same context.
+
 Example scripts
 ===============
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
index 6d3d5f80c1c3186ba35a7c28dd950e2f754e3027..6bf1e7493f61febb762a212d6306dcaa03fb00c4 100644 (file)
@@ -34,6 +34,8 @@ Required Properties for Clock Controller:
        - "samsung,exynos7-clock-peris"
        - "samsung,exynos7-clock-fsys0"
        - "samsung,exynos7-clock-fsys1"
+       - "samsung,exynos7-clock-mscl"
+       - "samsung,exynos7-clock-aud"
 
  - reg: physical base address of the controller and the length of
        memory mapped region.
@@ -53,6 +55,7 @@ Input clocks for top0 clock controller:
        - dout_sclk_bus1_pll
        - dout_sclk_cc_pll
        - dout_sclk_mfc_pll
+       - dout_sclk_aud_pll
 
 Input clocks for top1 clock controller:
        - fin_pll
@@ -76,6 +79,14 @@ Input clocks for peric1 clock controller:
        - sclk_uart1
        - sclk_uart2
        - sclk_uart3
+       - sclk_spi0
+       - sclk_spi1
+       - sclk_spi2
+       - sclk_spi3
+       - sclk_spi4
+       - sclk_i2s1
+       - sclk_pcm1
+       - sclk_spdif
 
 Input clocks for peris clock controller:
        - fin_pll
@@ -91,3 +102,7 @@ Input clocks for fsys1 clock controller:
        - dout_aclk_fsys1_200
        - dout_sclk_mmc0
        - dout_sclk_mmc1
+
+Input clocks for aud clock controller:
+       - fin_pll
+       - fout_aud_pll
index ded5d6212c84dfc0c6e9a5774b1f8e8ce975b0cb..c6620bc9670364315bcc8687ec81c016e7a89719 100644 (file)
@@ -1,4 +1,4 @@
-NVIDIA Tegra124 Clock And Reset Controller
+NVIDIA Tegra124 and Tegra132 Clock And Reset Controller
 
 This binding uses the common clock binding:
 Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -7,14 +7,16 @@ The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
 for muxing and gating Tegra's clocks, and setting their rates.
 
 Required properties :
-- compatible : Should be "nvidia,tegra124-car"
+- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car"
 - reg : Should contain CAR registers location and length
 - clocks : Should contain phandle and clock specifiers for two clocks:
   the 32 KHz "32k_in", and the board-specific oscillator "osc".
 - #clock-cells : Should be 1.
   In clock consumers, this cell represents the clock ID exposed by the
-  CAR. The assignments may be found in header file
-  <dt-bindings/clock/tegra124-car.h>.
+  CAR. The assignments may be found in the header files
+  <dt-bindings/clock/tegra124-car-common.h> (which covers IDs common
+  to Tegra124 and Tegra132) and <dt-bindings/clock/tegra124-car.h>
+  (for Tegra124-specific clocks).
 - #reset-cells : Should be 1.
   In clock consumers, this cell represents the bit number in the CAR's
   array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
new file mode 100644 (file)
index 0000000..dd755be
--- /dev/null
@@ -0,0 +1,21 @@
+Qualcomm LPASS Clock & Reset Controller Binding
+------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+                       "qcom,lcc-msm8960"
+                       "qcom,lcc-apq8064"
+                       "qcom,lcc-ipq8064"
+
+- reg : shall contain base register location and length
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Example:
+       clock-controller@28000000 {
+               compatible = "qcom,lcc-ipq8064";
+               reg = <0x28000000 0x1000>;
+               #clock-cells = <1>;
+               #reset-cells = <1>;
+       };
index 266ff9d232293a976de9e6a851e1bd7d1f2de7d0..df4a259a6898c5d7fe62fd5ad7e3ef323b5719b6 100644 (file)
@@ -1,6 +1,6 @@
-* Clock Block on Freescale CoreNet Platforms
+* Clock Block on Freescale QorIQ Platforms
 
-Freescale CoreNet chips take primary clocking input from the external
+Freescale qoriq chips take primary clocking input from the external
 SYSCLK signal. The SYSCLK input (frequency) is multiplied using
 multiple phase locked loops (PLL) to create a variety of frequencies
 which can then be passed to a variety of internal logic, including
@@ -29,6 +29,7 @@ Required properties:
        * "fsl,t4240-clockgen"
        * "fsl,b4420-clockgen"
        * "fsl,b4860-clockgen"
+       * "fsl,ls1021a-clockgen"
        Chassis clock strings include:
        * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
        * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
index 2e18676bd4b56503ce42ec4b4d368c32041fe1d0..0a80fa70ca265c0f2c7b7657d7270f47d6f53a45 100644 (file)
@@ -11,6 +11,7 @@ Required Properties:
 
   - compatible: Must be one of the following
     - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
+    - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
     - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
     - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
     - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
new file mode 100644 (file)
index 0000000..ece9239
--- /dev/null
@@ -0,0 +1,33 @@
+* Renesas R8A73A4 Clock Pulse Generator (CPG)
+
+The CPG generates core clocks for the R8A73A4 SoC. It includes five PLLs
+and several fixed ratio dividers.
+
+Required Properties:
+
+  - compatible: Must be "renesas,r8a73a4-cpg-clocks"
+
+  - reg: Base address and length of the memory resource used by the CPG
+
+  - clocks: Reference to the parent clocks ("extal1" and "extal2")
+
+  - #clock-cells: Must be 1
+
+  - clock-output-names: The names of the clocks. Supported clocks are "main",
+    "pll0", "pll1", "pll2", "pll2s", "pll2h", "z", "z2", "i", "m3", "b",
+    "m1", "m2", "zx", "zs", and "hp".
+
+
+Example
+-------
+
+        cpg_clocks: cpg_clocks@e6150000 {
+                compatible = "renesas,r8a73a4-cpg-clocks";
+                reg = <0 0xe6150000 0 0x10000>;
+                clocks = <&extal1_clk>, <&extal2_clk>;
+                #clock-cells = <1>;
+                clock-output-names = "main", "pll0", "pll1", "pll2",
+                                     "pll2s", "pll2h", "z", "z2",
+                                     "i", "m3", "b", "m1", "m2",
+                                     "zx", "zs", "hp";
+        };
index e6ad35b894f919f537dbf58cbdea6fffd9cddfc9..b02944fba9de4f8696d9f6ac7845acb96937aeed 100644 (file)
@@ -8,15 +8,18 @@ Required Properties:
   - compatible: Must be one of
     - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG
     - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
+    - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
     - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
     - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG
 
   - reg: Base address and length of the memory resource used by the CPG
 
-  - clocks: Reference to the parent clock
+  - clocks: References to the parent clocks: first to the EXTAL clock, second
+    to the USB_EXTAL clock
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are "main",
-    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1" and "z"
+    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
+    "adsp"
 
 
 Example
@@ -26,8 +29,9 @@ Example
                compatible = "renesas,r8a7790-cpg-clocks",
                             "renesas,rcar-gen2-cpg-clocks";
                reg = <0 0xe6150000 0 0x1000>;
-               clocks = <&extal_clk>;
+               clocks = <&extal_clk &usb_extal_clk>;
                #clock-cells = <1>;
                clock-output-names = "main", "pll0, "pll1", "pll3",
-                                    "lb", "qspi", "sdh", "sd0", "sd1", "z";
+                                    "lb", "qspi", "sdh", "sd0", "sd1", "z",
+                                    "rcan", "adsp";
        };
index 67b2b99f2b339f0a35a8fddfd9127779542ca76b..60b44285250d3b3e9e580f9d71e51239df3b1c76 100644 (file)
@@ -26,7 +26,7 @@ Required properties:
        "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
        "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
        "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
-       "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
+       "allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31
        "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
        "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
        "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
@@ -55,9 +55,11 @@ Required properties:
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
        "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
-       "allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10
-       "allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10
+       "allwinner,sun4i-a10-mmc-clk" - for the MMC clock
+       "allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80
+       "allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80
        "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
+       "allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80
        "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
        "allwinner,sun7i-a20-out-clk" - for the external output clocks
        "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
@@ -73,7 +75,9 @@ Required properties for all clocks:
 - #clock-cells : from common clock binding; shall be set to 0 except for
        the following compatibles where it shall be set to 1:
        "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk"
+       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
+       "allwinner,*-usb-clk", "allwinner,*-mmc-clk",
+       "allwinner,*-mmc-config-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
        If the clock module only has one output, the name shall be the
        module name.
@@ -81,6 +85,10 @@ Required properties for all clocks:
 And "allwinner,*-usb-clk" clocks also require:
 - reset-cells : shall be set to 1
 
+The "allwinner,sun9i-a80-mmc-config-clk" clock also requires:
+- #reset-cells : shall be set to 1
+- resets : shall be the reset control phandle for the mmc block.
+
 For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate
 dummy clocks at 25 MHz and 125 MHz, respectively. See example.
 
@@ -95,6 +103,14 @@ For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
 is the normal PLL6 output, or "pll6". The second output is rate doubled
 PLL6, or "pll6x2".
 
+The "allwinner,*-mmc-clk" clocks have three different outputs: the
+main clock, with the ID 0, and the output and sample clocks, with the
+IDs 1 and 2, respectively.
+
+The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output
+per mmc controller. The number of outputs is determined by the size of
+the address block, which is related to the overall mmc block.
+
 For example:
 
 osc24M: clk@01c20050 {
@@ -138,11 +154,11 @@ cpu: cpu@01c20054 {
 };
 
 mmc0_clk: clk@01c20088 {
-       #clock-cells = <0>;
-       compatible = "allwinner,sun4i-mod0-clk";
+       #clock-cells = <1>;
+       compatible = "allwinner,sun4i-a10-mmc-clk";
        reg = <0x01c20088 0x4>;
        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-       clock-output-names = "mmc0";
+       clock-output-names = "mmc0", "mmc0_output", "mmc0_sample";
 };
 
 mii_phy_tx_clk: clk@2 {
@@ -170,3 +186,16 @@ gmac_clk: clk@01c20164 {
        clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
        clock-output-names = "gmac";
 };
+
+mmc_config_clk: clk@01c13000 {
+       compatible = "allwinner,sun9i-a80-mmc-config-clk";
+       reg = <0x01c13000 0x10>;
+       clocks = <&ahb0_gates 8>;
+       clock-names = "ahb";
+       resets = <&ahb0_resets 8>;
+       reset-names = "ahb";
+       #clock-cells = <1>;
+       #reset-cells = <1>;
+       clock-output-names = "mmc0_config", "mmc1_config",
+                            "mmc2_config", "mmc3_config";
+};
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
new file mode 100644 (file)
index 0000000..616836e
--- /dev/null
@@ -0,0 +1,42 @@
+Bindings for Texas Instruments CDCE706 programmable 3-PLL clock
+synthesizer/multiplier/divider.
+
+Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+
+I2C device node required properties:
+- compatible: shall be "ti,cdce706".
+- reg: i2c device address, shall be in range [0x68...0x6b].
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+  handles, shall be reference clock(s) connected to CLK_IN0
+  and CLK_IN1 pins.
+- clock-names: shall be clk_in0 and/or clk_in1. Use clk_in0
+  in case of crystal oscillator or differential signal input
+  configuration. Use clk_in0 and clk_in1 in case of independent
+  single-ended LVCMOS inputs configuration.
+
+Example:
+
+       clocks {
+               clk54: clk54 {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <54000000>;
+               };
+       };
+       ...
+       i2c0: i2c-master@0d090000 {
+               ...
+               cdce706: clock-synth@69 {
+                       compatible = "ti,cdce706";
+                       #clock-cells = <1>;
+                       reg = <0x69>;
+                       clocks = <&clk54>;
+                       clock-names = "clk_in0";
+               };
+       };
+       ...
+       simple-audio-card,codec {
+               ...
+               clocks = <&cdce706 4>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
new file mode 100644 (file)
index 0000000..c19b3f2
--- /dev/null
@@ -0,0 +1,33 @@
+Binding for Texas Instruments FAPLL clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. It assumes a
+register-mapped FAPLL with usually two selectable input clocks
+(reference clock and bypass clock), and one or more child
+syntesizers.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "ti,dm816-fapll-clock"
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
+- reg : address and length of the register set for controlling the FAPLL.
+
+Examples:
+       main_fapll: main_fapll {
+               #clock-cells = <1>;
+               compatible = "ti,dm816-fapll-clock";
+               reg = <0x400 0x40>;
+               clocks = <&sys_clkin_ck &sys_clkin_ck>;
+               clock-indices = <1>, <2>, <3>, <4>, <5>,
+                               <6>, <7>;
+               clock-output-names = "main_pll_clk1",
+                                    "main_pll_clk2",
+                                    "main_pll_clk3",
+                                    "main_pll_clk4",
+                                    "main_pll_clk5",
+                                    "main_pll_clk6",
+                                    "main_pll_clk7";
+       };
diff --git a/Documentation/devicetree/bindings/dma/img-mdc-dma.txt b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
new file mode 100644 (file)
index 0000000..28c1341
--- /dev/null
@@ -0,0 +1,57 @@
+* IMG Multi-threaded DMA Controller (MDC)
+
+Required properties:
+- compatible: Must be "img,pistachio-mdc-dma".
+- reg: Must contain the base address and length of the MDC registers.
+- interrupts: Must contain all the per-channel DMA interrupts.
+- clocks: Must contain an entry for each entry in clock-names.
+  See ../clock/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+  - sys: MDC system interface clock.
+- img,cr-periph: Must contain a phandle to the peripheral control syscon
+  node which contains the DMA request to channel mapping registers.
+- img,max-burst-multiplier: Must be the maximum supported burst size multiplier.
+  The maximum burst size is this value multiplied by the hardware-reported bus
+  width.
+- #dma-cells: Must be 3:
+  - The first cell is the peripheral's DMA request line.
+  - The second cell is a bitmap specifying to which channels the DMA request
+    line may be mapped (i.e. bit N set indicates channel N is usable).
+  - The third cell is the thread ID to be used by the channel.
+
+Optional properties:
+- dma-channels: Number of supported DMA channels, up to 32.  If not specified
+  the number reported by the hardware is used.
+
+Example:
+
+mdc: dma-controller@18143000 {
+       compatible = "img,pistachio-mdc-dma";
+       reg = <0x18143000 0x1000>;
+       interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 29 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 30 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 32 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 35 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 36 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 37 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SHARED 38 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&system_clk>;
+       clock-names = "sys";
+
+       img,max-burst-multiplier = <16>;
+       img,cr-periph = <&cr_periph>;
+
+       #dma-cells = <3>;
+};
+
+spi@18100f00 {
+       ...
+       dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>;
+       dma-names = "tx", "rx";
+       ...
+};
index f7e21b1c2a055e04fca6fd03e6e5b94357db71ef..09daeef1ff2249d71a8e42e176b7a066447a7084 100644 (file)
@@ -5,9 +5,6 @@ controller instances named DMAC capable of serving multiple clients. Channels
 can be dedicated to specific clients or shared between a large number of
 clients.
 
-DMA clients are connected to the DMAC ports referenced by an 8-bit identifier
-called MID/RID.
-
 Each DMA client is connected to one dedicated port of the DMAC, identified by
 an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
 256 clients in total. When the number of hardware channels is lower than the
index d58675ea1abf2e998af99b8d69f5a2b6b1b71e2a..c261598164a7612c929092ebf2876b478a1c70f6 100644 (file)
@@ -38,7 +38,7 @@ Example:
                chan_allocation_order = <1>;
                chan_priority = <1>;
                block_size = <0xfff>;
-               data_width = <3 3 0 0>;
+               data_width = <3 3>;
        };
 
 DMA clients connected to the Designware DMA controller must use the format
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
new file mode 100644 (file)
index 0000000..81f982c
--- /dev/null
@@ -0,0 +1,37 @@
+Broadcom iProc I2C controller
+
+Required properties:
+
+- compatible:
+    Must be "brcm,iproc-i2c"
+
+- reg:
+    Define the base and range of the I/O address space that contain the iProc
+    I2C controller registers
+
+- interrupts:
+    Should contain the I2C interrupt
+
+- clock-frequency:
+    This is the I2C bus clock. Need to be either 100000 or 400000
+
+- #address-cells:
+    Always 1 (for I2C addresses)
+
+- #size-cells:
+    Always 0
+
+Example:
+       i2c0: i2c@18008000 {
+               compatible = "brcm,iproc-i2c";
+               reg = <0x18008000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+
+               codec: wm8750@1a {
+                       compatible = "wlf,wm8750";
+                       reg = <0x1a>;
+               };
+       };
index 34a3fb6f8488b26a9c5def9866898d807ee7df55..cf53d5fba20a0934c631b8320e538cbc5440e155 100644 (file)
@@ -16,6 +16,9 @@ Required Properties:
 Optional Properties:
 
   - reset-gpios: Reference to the GPIO connected to the reset input.
+  - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
+    children in idle state. This is necessary for example, if there are several
+    multiplexers on the bus and the devices behind them use same I2C addresses.
 
 
 Example:
index 1637c298a1b337bf83612cb12129001ecd11da24..17bef9a34e507541ea3f201039b04e199e551fa3 100644 (file)
@@ -4,24 +4,60 @@ Required properties:
 - compatible      : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
 - reg             : bus address start and address range size of device
 - interrupts      : interrupt number
-- clock-frequency : frequency of bus clock in Hz
+- clocks          : handle to the controller clock; see the note below.
+                    Mutually exclusive with opencores,ip-clock-frequency
+- opencores,ip-clock-frequency: frequency of the controller clock in Hz;
+                    see the note below. Mutually exclusive with clocks
 - #address-cells  : should be <1>
 - #size-cells     : should be <0>
 
 Optional properties:
+- clock-frequency : frequency of bus clock in Hz; see the note below.
+                    Defaults to 100 KHz when the property is not specified
 - reg-shift       : device register offsets are shifted by this value
 - reg-io-width    : io register width in bytes (1, 2 or 4)
 - regstep         : deprecated, use reg-shift above
 
-Example:
+Note
+clock-frequency property is meant to control the bus frequency for i2c bus
+drivers, but it was incorrectly used to specify i2c controller input clock
+frequency. So the following rules are set to fix this situation:
+- if clock-frequency is present and neither opencores,ip-clock-frequency nor
+  clocks are, then clock-frequency specifies i2c controller clock frequency.
+  This is to keep backwards compatibility with setups using old DTB. i2c bus
+  frequency is fixed at 100 KHz.
+- if clocks is present it specifies i2c controller clock. clock-frequency
+  property specifies i2c bus frequency.
+- if opencores,ip-clock-frequency is present it specifies i2c controller
+  clock frequency. clock-frequency property specifies i2c bus frequency.
 
+Examples:
+
+       i2c0: ocores@a0000000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "opencores,i2c-ocores";
+               reg = <0xa0000000 0x8>;
+               interrupts = <10>;
+               opencores,ip-clock-frequency = <20000000>;
+
+               reg-shift = <0>;        /* 8 bit registers */
+               reg-io-width = <1>;     /* 8 bit read/write */
+
+               dummy@60 {
+                       compatible = "dummy";
+                       reg = <0x60>;
+               };
+       };
+or
        i2c0: ocores@a0000000 {
                #address-cells = <1>;
                #size-cells = <0>;
                compatible = "opencores,i2c-ocores";
                reg = <0xa0000000 0x8>;
                interrupts = <10>;
-               clock-frequency = <20000000>;
+               clocks = <&osc>;
+               clock-frequency = <400000>; /* i2c bus frequency 400 KHz */
 
                reg-shift = <0>;        /* 8 bit registers */
                reg-io-width = <1>;     /* 8 bit read/write */
index dde6c22ce91a13df20eaab99ebd6dbf684983768..f0d71bc52e64be39cea42a7cc04b1e05662ad641 100644 (file)
@@ -21,6 +21,17 @@ Required on RK3066, RK3188 :
 Optional properties :
 
  - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
+ - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise
+       (t(r) in I2C specification). If not specified this is assumed to be
+       the maximum the specification allows(1000 ns for Standard-mode,
+       300 ns for Fast-mode) which might cause slightly slower communication.
+ - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall
+       (t(f) in the I2C specification). If not specified this is assumed to
+       be the maximum the specification allows (300 ns) which might cause
+       slightly slower communication.
+ - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall
+       (t(f) in the I2C specification). If not specified we'll use the SCL
+       value since they are the same in nearly all cases.
 
 Example:
 
@@ -39,4 +50,7 @@ i2c0: i2c@2002d000 {
 
        clock-names = "i2c";
        clocks = <&cru PCLK_I2C0>;
+
+       i2c-scl-rising-time-ns = <800>;
+       i2c-scl-falling-time-ns = <100>;
 };
index 4dcd88d5f7ca453503120236a2ee3670ac7a55e8..aaa8325004d23ae6313223594817f392c1a31359 100644 (file)
@@ -61,9 +61,8 @@ fsl,sgtl5000          SGTL5000: Ultra Low-Power Audio Codec
 gmt,g751               G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
 infineon,slb9635tt     Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
 infineon,slb9645tt     Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-isl,isl12057           Intersil ISL12057 I2C RTC Chip
-isil,isl29028           (deprecated, use isl)
-isl,isl29028            Intersil ISL29028 Ambient Light and Proximity Sensor
+isil,isl12057          Intersil ISL12057 I2C RTC Chip
+isil,isl29028          Intersil ISL29028 Ambient Light and Proximity Sensor
 maxim,ds1050           5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237          Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625          9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt
new file mode 100644 (file)
index 0000000..42c6fa6
--- /dev/null
@@ -0,0 +1,93 @@
+* Dialog DA9063 Power Management Integrated Circuit (PMIC)
+
+DA9093 consists of a large and varied group of sub-devices (I2C Only):
+
+Device                   Supply Names    Description
+------                   ------------    -----------
+da9063-regulator        :               : LDOs & BUCKs
+da9063-rtc              :               : Real-Time Clock
+da9063-watchdog         :               : Watchdog
+
+======
+
+Required properties:
+
+- compatible : Should be "dlg,da9063"
+- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
+  modified to match the chip's OTP settings).
+- interrupt-parent : Specifies the reference to the interrupt controller for
+  the DA9063.
+- interrupts : IRQ line information.
+- interrupt-controller
+
+Sub-nodes:
+
+- regulators : This node defines the settings for the LDOs and BUCKs. The
+  DA9063 regulators are bound using their names listed below:
+
+    bcore1    : BUCK CORE1
+    bcore2    : BUCK CORE2
+    bpro      : BUCK PRO
+    bmem      : BUCK MEM
+    bio       : BUCK IO
+    bperi     : BUCK PERI
+    ldo1      : LDO_1
+    ldo2      : LDO_2
+    ldo3      : LDO_3
+    ldo4      : LDO_4
+    ldo5      : LDO_5
+    ldo6      : LDO_6
+    ldo7      : LDO_7
+    ldo8      : LDO_8
+    ldo9      : LDO_9
+    ldo10     : LDO_10
+    ldo11     : LDO_11
+
+  The component follows the standard regulator framework and the bindings
+  details of individual regulator device can be found in:
+  Documentation/devicetree/bindings/regulator/regulator.txt
+
+- rtc : This node defines settings for the Real-Time Clock associated with
+  the DA9063. There are currently no entries in this binding, however
+  compatible = "dlg,da9063-rtc" should be added if a node is created.
+
+- watchdog : This node defines settings for the Watchdog timer associated
+  with the DA9063. There are currently no entries in this binding, however
+  compatible = "dlg,da9063-watchdog" should be added if a node is created.
+
+
+Example:
+
+       pmic0: da9063@58 {
+               compatible = "dlg,da9063"
+               reg = <0x58>;
+               interrupt-parent = <&gpio6>;
+               interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-controller;
+
+               rtc {
+                       compatible = "dlg,da9063-rtc";
+               };
+
+               wdt {
+                       compatible = "dlg,da9063-watchdog";
+               };
+
+               regulators {
+                       DA9063_BCORE1: bcore1 {
+                               regulator-name = "BCORE1";
+                               regulator-min-microvolt = <300000>;
+                               regulator-max-microvolt = <1570000>;
+                               regulator-min-microamp = <500000>;
+                               regulator-max-microamp = <2000000>;
+                               regulator-boot-on;
+                       };
+                       DA9063_LDO11: ldo11 {
+                               regulator-name = "LDO_11";
+                               regulator-min-microvolt = <900000>;
+                               regulator-max-microvolt = <3600000>;
+                               regulator-boot-on;
+                       };
+               };
+       };
+
diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
new file mode 100644 (file)
index 0000000..85e3198
--- /dev/null
@@ -0,0 +1,70 @@
+Qualcomm Resource Power Manager (RPM)
+
+This driver is used to interface with the Resource Power Manager (RPM) found in
+various Qualcomm platforms. The RPM allows each component in the system to vote
+for state of the system resources, such as clocks, regulators and bus
+frequencies.
+
+- compatible:
+       Usage: required
+       Value type: <string>
+       Definition: must be one of:
+                   "qcom,rpm-apq8064"
+                   "qcom,rpm-msm8660"
+                   "qcom,rpm-msm8960"
+
+- reg:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: base address and size of the RPM's message ram
+
+- interrupts:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: three entries specifying the RPM's:
+                   1. acknowledgement interrupt
+                   2. error interrupt
+                   3. wakeup interrupt
+
+- interrupt-names:
+       Usage: required
+       Value type: <string-array>
+       Definition: must be the three strings "ack", "err" and "wakeup", in order
+
+- #address-cells:
+       Usage: required
+       Value type: <u32>
+       Definition: must be 1
+
+- #size-cells:
+       Usage: required
+       Value type: <u32>
+       Definition: must be 0
+
+- qcom,ipc:
+       Usage: required
+       Value type: <prop-encoded-array>
+
+       Definition: three entries specifying the outgoing ipc bit used for
+                   signaling the RPM:
+                   - phandle to a syscon node representing the apcs registers
+                   - u32 representing offset to the register within the syscon
+                   - u32 representing the ipc bit within the register
+
+
+= EXAMPLE
+
+       #include <dt-bindings/mfd/qcom-rpm.h>
+
+       rpm@108000 {
+               compatible = "qcom,rpm-msm8960";
+               reg = <0x108000 0x1000>;
+               qcom,ipc = <&apcs 0x8 2>;
+
+               interrupts = <0 19 0>, <0 21 0>, <0 22 0>;
+               interrupt-names = "ack", "err", "wakeup";
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
new file mode 100644 (file)
index 0000000..f39a1aa
--- /dev/null
@@ -0,0 +1,43 @@
+* Cavium Interrupt Bus widget
+
+Properties:
+- compatible: "cavium,octeon-7130-cib"
+
+  Compatibility with cn70XX SoCs.
+
+- interrupt-controller:  This is an interrupt controller.
+
+- reg: Two elements consisting of the addresses of the RAW and EN
+  registers of the CIB block
+
+- cavium,max-bits: The index (zero based) of the highest numbered bit
+  in the CIB block.
+
+- interrupt-parent:  Always the CIU on the SoC.
+
+- interrupts: The CIU line to which the CIB block is connected.
+
+- #interrupt-cells: Must be <2>.  The first cell is the bit within the
+   CIB.  The second cell specifies the triggering semantics of the
+   line.
+
+Example:
+
+       interrupt-controller@107000000e000 {
+               compatible = "cavium,octeon-7130-cib";
+               reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */
+                     <0x10700 0x0000e100 0x0 0x8>; /* EN */
+               cavium,max-bits = <23>;
+
+               interrupt-controller;
+               interrupt-parent = <&ciu>;
+               interrupts = <1 24>;
+               /* Interrupts are specified by two parts:
+                * 1) Bit number in the CIB* registers
+                * 2) Triggering (1 - edge rising
+                *                2 - edge falling
+                *                4 - level active high
+                *                8 - level active low)
+                */
+               #interrupt-cells = <2>;
+       };
index 91b3a34671508bdf11a1f368dde9588fc34961df..4bf41d8338046ef5aef2e47e4ee1dccf727acfe2 100644 (file)
@@ -10,8 +10,8 @@ Absolute maximum transfer rate is 200MB/s
 Required properties:
  - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc"
  - reg : mmc controller base registers
- - clocks : a list with 2 phandle + clock specifier pairs
- - clock-names : must contain "ahb" and "mmc"
+ - clocks : a list with 4 phandle + clock specifier pairs
+ - clock-names : must contain "ahb", "mmc", "output" and "sample"
  - interrupts : mmc controller interrupt
 
 Optional properties:
@@ -25,8 +25,8 @@ Examples:
        mmc0: mmc@01c0f000 {
                compatible = "allwinner,sun5i-a13-mmc";
                reg = <0x01c0f000 0x1000>;
-               clocks = <&ahb_gates 8>, <&mmc0_clk>;
-               clock-names = "ahb", "mod";
+               clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>;
+               clock-names = "ahb", "mod", "output", "sample";
                interrupts = <0 32 4>;
                status = "disabled";
        };
index 1fe6dde9849942a51bb515f360489d96591365a9..7d4c8eb775a5fff5da9861b632d11e18a6786eee 100644 (file)
@@ -1,7 +1,7 @@
 Atmel NAND flash
 
 Required properties:
-- compatible : "atmel,at91rm9200-nand".
+- compatible : should be "atmel,at91rm9200-nand" or "atmel,sama5d4-nand".
 - reg : should specify localbus address and size used for the chip,
        and hardware ECC controller if available.
        If the hardware ECC is PMECC, it should contain address and size for
index 823d134121956362414c546134dde3744e03fb5e..4461dc71cb10097efc887240c9f319660d43972a 100644 (file)
@@ -1,7 +1,7 @@
 * Freescale Quad Serial Peripheral Interface(QuadSPI)
 
 Required properties:
-  - compatible : Should be "fsl,vf610-qspi"
+  - compatible : Should be "fsl,vf610-qspi" or "fsl,imx6sx-qspi"
   - reg : the first contains the register location and length,
           the second contains the memory mapping address and length
   - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
index a011fdf61dbfaeac39ab76bb48db143443243549..d02acaff3c35e98476b91c26757f30355736cb3b 100644 (file)
@@ -1,7 +1,7 @@
 * Freescale General-Purpose Media Interface (GPMI)
 
 The GPMI nand controller provides an interface to control the
-NAND flash chips. We support only one NAND chip now.
+NAND flash chips.
 
 Required properties:
   - compatible : should be "fsl,<chip>-gpmi-nand"
diff --git a/Documentation/devicetree/bindings/mtd/hisi504-nand.txt b/Documentation/devicetree/bindings/mtd/hisi504-nand.txt
new file mode 100644 (file)
index 0000000..2e35f06
--- /dev/null
@@ -0,0 +1,47 @@
+Hisilicon Hip04 Soc NAND controller DT binding
+
+Required properties:
+
+- compatible:          Should be "hisilicon,504-nfc".
+- reg:                 The first contains base physical address and size of
+                       NAND controller's registers. The second contains base
+                       physical address and size of NAND controller's buffer.
+- interrupts:          Interrupt number for nfc.
+- nand-bus-width:      See nand.txt.
+- nand-ecc-mode:       Support none and hw ecc mode.
+- #address-cells:      Partition address, should be set 1.
+- #size-cells:         Partition size, should be set 1.
+
+Optional properties:
+
+- nand-ecc-strength:   Number of bits to correct per ECC step.
+- nand-ecc-step-size:  Number of data bytes covered by a single ECC step.
+
+The following ECC strength and step size are currently supported:
+
+ - nand-ecc-strength = <16>, nand-ecc-step-size = <1024>
+
+Flash chip may optionally contain additional sub-nodes describing partitions of
+the address space. See partition.txt for more detail.
+
+Example:
+
+       nand: nand@4020000 {
+               compatible = "hisilicon,504-nfc";
+               reg = <0x4020000 0x10000>, <0x5000000 0x1000>;
+               interrupts = <0 379 4>;
+               nand-bus-width = <8>;
+               nand-ecc-mode = "hw";
+               nand-ecc-strength = <16>;
+               nand-ecc-step-size = <1024>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               partition@0 {
+                       label = "nand_text";
+                       reg = <0x00000000 0x00400000>;
+               };
+
+               ...
+
+       };
index 6b9f680cb579bd7fbca4f1ee17f610e6c0b079bf..4a0a48bf4ecb831d0fc1e2b0263208d0b42cbb36 100644 (file)
@@ -36,6 +36,11 @@ are defined:
  - vendor-id : Contains the flash chip's vendor id (1 byte).
  - device-id : Contains the flash chip's device id (1 byte).
 
+For ROM compatible devices (and ROM fallback from cfi-flash), the following
+additional (optional) property is defined:
+
+ - erase-size : The chip's physical erase block size in bytes.
+
 The device tree may optionally contain sub-nodes describing partitions of the
 address space. See partition.txt for more detail.
 
index 33df3932168e1b8941a5de1565b1cf02f96fb551..8db32384a4866e56094aa96e86ac8765ccdf519c 100644 (file)
@@ -27,6 +27,8 @@ property is used.
 - amd,serdes-cdr-rate: CDR rate speed selection
 - amd,serdes-pq-skew: PQ (data sampling) skew
 - amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
 
 Example:
        xgbe_phy@e1240800 {
@@ -41,4 +43,6 @@ Example:
                amd,serdes-cdr-rate = <2>, <2>, <7>;
                amd,serdes-pq-skew = <10>, <10>, <30>;
                amd,serdes-tx-amp = <15>, <15>, <10>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
        };
index aaa696414f57a1012b5fc18586ce06da6a899309..ba19d671e8081148529368442fa39be451294608 100644 (file)
@@ -2,10 +2,13 @@
 
 Required properties:
 - compatible: Should be "cdns,[<chip>-]{macb|gem}"
-  Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
+  Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP
+  available on sama5d3 SoCs.
   Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
+  Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
+  Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
 - phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/pwm/img-pwm.txt b/Documentation/devicetree/bindings/pwm/img-pwm.txt
new file mode 100644 (file)
index 0000000..fade5f2
--- /dev/null
@@ -0,0 +1,24 @@
+*Imagination Technologies PWM DAC driver
+
+Required properties:
+  - compatible: Should be "img,pistachio-pwm"
+  - reg: Should contain physical base address and length of pwm registers.
+  - clocks: Must contain an entry for each entry in clock-names.
+       See ../clock/clock-bindings.txt for details.
+  - clock-names: Must include the following entries.
+    - pwm: PWM operating clock.
+    - sys: PWM system interface clock.
+  - #pwm-cells: Should be 2. See pwm.txt in this directory for the
+       description of the cells format.
+  - img,cr-periph: Must contain a phandle to the peripheral control
+       syscon node which contains PWM control registers.
+
+Example:
+       pwm: pwm@18101300 {
+               compatible = "img,pistachio-pwm";
+               reg = <0x18101300 0x100>;
+               clocks = <&pwm_clk>, <&system_clk>;
+               clock-names = "pwm", "sys";
+               #pwm-cells = <2>;
+               img,cr-periph = <&cr_periph>;
+       };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
new file mode 100644 (file)
index 0000000..ae0273e
--- /dev/null
@@ -0,0 +1,20 @@
+Allwinner sun4i and sun7i SoC PWM controller
+
+Required properties:
+  - compatible: should be one of:
+    - "allwinner,sun4i-a10-pwm"
+    - "allwinner,sun7i-a20-pwm"
+  - reg: physical base address and length of the controller's registers
+  - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+    the cells format.
+  - clocks: From common clock binding, handle to the parent clock.
+
+Example:
+
+       pwm: pwm@01c20e00 {
+               compatible = "allwinner,sun7i-a20-pwm";
+               reg = <0x01c20e00 0xc>;
+               clocks = <&osc24M>;
+               #pwm-cells = <3>;
+               status = "disabled";
+       };
index ae738f562acca6bf915ef5691dfd32da9d2e23c8..695150a4136bb3c3291e94fcbf7f7cdddfe6045e 100644 (file)
@@ -12,6 +12,7 @@
               "samsung,exynos5420-tmu-ext-triminfo" for TMU channels 2, 3 and 4
                        Exynos5420 (Must pass triminfo base and triminfo clock)
               "samsung,exynos5440-tmu"
+              "samsung,exynos7-tmu"
 - interrupt-parent : The phandle for the interrupt controller
 - reg : Address range of the thermal registers. For soc's which has multiple
        instances of TMU and some registers are shared across all TMU's like
 - clocks : The main clocks for TMU device
        -- 1. operational clock for TMU channel
        -- 2. optional clock to access the shared registers of TMU channel
+       -- 3. optional special clock for functional operation
 - clock-names : Thermal system clock name
        -- "tmu_apbif" operational clock for current TMU channel
        -- "tmu_triminfo_apbif" clock to access the shared triminfo register
                for current TMU channel
+       -- "tmu_sclk" clock for functional operation of the current TMU
+               channel
 - vtmu-supply: This entry is optional and provides the regulator node supplying
                voltage to TMU. If needed this entry can be placed inside
                board/platform specific dts file.
+Following properties are mandatory (depending on SoC):
+- samsung,tmu_gain: Gain value for internal TMU operation.
+- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage
+- samsung,tmu_noise_cancel_mode: Mode for noise cancellation
+- samsung,tmu_efuse_value: Default level of temperature - it is needed when
+                          in factory fusing produced wrong value
+- samsung,tmu_min_efuse_value: Minimum temperature fused value
+- samsung,tmu_max_efuse_value: Maximum temperature fused value
+- samsung,tmu_first_point_trim: First point trimming value
+- samsung,tmu_second_point_trim: Second point trimming value
+- samsung,tmu_default_temp_offset: Default temperature offset
+- samsung,tmu_cal_type: Callibration type
 
 Example 1):
 
@@ -51,6 +67,7 @@ Example 1):
                clock-names = "tmu_apbif";
                status = "disabled";
                vtmu-supply = <&tmu_regulator_node>;
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
 Example 2):
@@ -61,6 +78,7 @@ Example 2):
                interrupts = <0 58 0>;
                clocks = <&clock 21>;
                clock-names = "tmu_apbif";
+               #include "exynos5440-tmu-sensor-conf.dtsi"
        };
 
 Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
@@ -70,6 +88,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
                interrupts = <0 184 0>;
                clocks = <&clock 318>, <&clock 318>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_cpu3: tmu@1006c000 {
@@ -78,6 +97,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
                interrupts = <0 185 0>;
                clocks = <&clock 318>, <&clock 319>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_gpu: tmu@100a0000 {
@@ -86,6 +106,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
                interrupts = <0 215 0>;
                clocks = <&clock 319>, <&clock 318>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
 Note: For multi-instance tmu each instance should have an alias correctly
index f5db6b72a36fdb78a2f3e72413e1241ae4867a66..29fe0bfae38e454975dddf145d20d6acbde8bef6 100644 (file)
@@ -251,24 +251,24 @@ ocp {
 };
 
 thermal-zones {
-       cpu-thermal: cpu-thermal {
+       cpu_thermal: cpu-thermal {
                polling-delay-passive = <250>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
                thermal-sensors = <&bandgap0>;
 
                trips {
-                       cpu-alert0: cpu-alert {
+                       cpu_alert0: cpu-alert0 {
                                temperature = <90000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "active";
                        };
-                       cpu-alert1: cpu-alert {
+                       cpu_alert1: cpu-alert1 {
                                temperature = <100000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       cpu-crit: cpu-crit {
+                       cpu_crit: cpu-crit {
                                temperature = <125000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -277,17 +277,17 @@ thermal-zones {
 
                cooling-maps {
                        map0 {
-                               trip = <&cpu-alert0>;
-                               cooling-device = <&fan0 THERMAL_NO_LIMITS 4>;
+                               trip = <&cpu_alert0>;
+                               cooling-device = <&fan0 THERMAL_NO_LIMIT 4>;
                        };
                        map1 {
-                               trip = <&cpu-alert1>;
-                               cooling-device = <&fan0 5 THERMAL_NO_LIMITS>;
+                               trip = <&cpu_alert1>;
+                               cooling-device = <&fan0 5 THERMAL_NO_LIMIT>;
                        };
                        map2 {
-                               trip = <&cpu-alert1>;
+                               trip = <&cpu_alert1>;
                                cooling-device =
-                                   <&cpu0 THERMAL_NO_LIMITS THERMAL_NO_LIMITS>;
+                                   <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
                        };
                };
        };
@@ -298,13 +298,13 @@ used to monitor the zone 'cpu-thermal' using its sole sensor. A fan
 device (fan0) is controlled via I2C bus 1, at address 0x48, and has ten
 different cooling states 0-9. It is used to remove the heat out of
 the thermal zone 'cpu-thermal' using its cooling states
-from its minimum to 4, when it reaches trip point 'cpu-alert0'
+from its minimum to 4, when it reaches trip point 'cpu_alert0'
 at 90C, as an example of active cooling. The same cooling device is used at
-'cpu-alert1', but from 5 to its maximum state. The cpu@0 device is also
+'cpu_alert1', but from 5 to its maximum state. The cpu@0 device is also
 linked to the same thermal zone, 'cpu-thermal', as a passive cooling device,
-using all its cooling states at trip point 'cpu-alert1',
+using all its cooling states at trip point 'cpu_alert1',
 which is a trip point at 100C. On the thermal zone 'cpu-thermal', at the
-temperature of 125C, represented by the trip point 'cpu-crit', the silicon
+temperature of 125C, represented by the trip point 'cpu_crit', the silicon
 is not reliable anymore.
 
 (b) - IC with several internal sensors
@@ -329,7 +329,7 @@ ocp {
 };
 
 thermal-zones {
-       cpu-thermal: cpu-thermal {
+       cpu_thermal: cpu-thermal {
                polling-delay-passive = <250>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -338,12 +338,12 @@ thermal-zones {
 
                trips {
                        /* each zone within the SoC may have its own trips */
-                       cpu-alert: cpu-alert {
+                       cpu_alert: cpu-alert {
                                temperature = <100000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       cpu-crit: cpu-crit {
+                       cpu_crit: cpu-crit {
                                temperature = <125000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -356,7 +356,7 @@ thermal-zones {
                };
        };
 
-       gpu-thermal: gpu-thermal {
+       gpu_thermal: gpu-thermal {
                polling-delay-passive = <120>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -365,12 +365,12 @@ thermal-zones {
 
                trips {
                        /* each zone within the SoC may have its own trips */
-                       gpu-alert: gpu-alert {
+                       gpu_alert: gpu-alert {
                                temperature = <90000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       gpu-crit: gpu-crit {
+                       gpu_crit: gpu-crit {
                                temperature = <105000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -383,7 +383,7 @@ thermal-zones {
                };
        };
 
-       dsp-thermal: dsp-thermal {
+       dsp_thermal: dsp-thermal {
                polling-delay-passive = <50>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -392,12 +392,12 @@ thermal-zones {
 
                trips {
                        /* each zone within the SoC may have its own trips */
-                       dsp-alert: gpu-alert {
+                       dsp_alert: dsp-alert {
                                temperature = <90000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       dsp-crit: gpu-crit {
+                       dsp_crit: gpu-crit {
                                temperature = <135000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -457,7 +457,7 @@ ocp {
 };
 
 thermal-zones {
-       cpu-thermal: cpu-thermal {
+       cpu_thermal: cpu-thermal {
                polling-delay-passive = <250>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -508,7 +508,7 @@ with many sensors and many cooling devices.
        /*
         * An IC with several temperature sensor.
         */
-       adc-dummy: sensor@0x50 {
+       adc_dummy: sensor@0x50 {
                ...
                #thermal-sensor-cells = <1>; /* sensor internal ID */
        };
@@ -520,7 +520,7 @@ thermal-zones {
                polling-delay = <2500>; /* milliseconds */
 
                                /* sensor       ID */
-               thermal-sensors = <&adc-dummy     4>;
+               thermal-sensors = <&adc_dummy     4>;
 
                trips {
                        ...
@@ -531,14 +531,14 @@ thermal-zones {
                };
        };
 
-       board-thermal: board-thermal {
+       board_thermal: board-thermal {
                polling-delay-passive = <1000>; /* milliseconds */
                polling-delay = <2500>; /* milliseconds */
 
                                /* sensor       ID */
-               thermal-sensors = <&adc-dummy     0>, /* pcb top edge */
-                                 <&adc-dummy     1>, /* lcd */
-                                 <&adc-dymmy     2>; /* back cover */
+               thermal-sensors = <&adc_dummy     0>, /* pcb top edge */
+                                 <&adc_dummy     1>, /* lcd */
+                                 <&adc_dummy     2>; /* back cover */
                /*
                 * An array of coefficients describing the sensor
                 * linear relation. E.g.:
@@ -548,22 +548,22 @@ thermal-zones {
 
                trips {
                        /* Trips are based on resulting linear equation */
-                       cpu-trip: cpu-trip {
+                       cpu_trip: cpu-trip {
                                temperature = <60000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       gpu-trip: gpu-trip {
+                       gpu_trip: gpu-trip {
                                temperature = <55000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        }
-                       lcd-trip: lcp-trip {
+                       lcd_trip: lcp-trip {
                                temperature = <53000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       crit-trip: crit-trip {
+                       crit_trip: crit-trip {
                                temperature = <68000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -572,17 +572,17 @@ thermal-zones {
 
                cooling-maps {
                        map0 {
-                               trip = <&cpu-trip>;
+                               trip = <&cpu_trip>;
                                cooling-device = <&cpu0 0 2>;
                                contribution = <55>;
                        };
                        map1 {
-                               trip = <&gpu-trip>;
+                               trip = <&gpu_trip>;
                                cooling-device = <&gpu0 0 2>;
                                contribution = <20>;
                        };
                        map2 {
-                               trip = <&lcd-trip>;
+                               trip = <&lcd_trip>;
                                cooling-device = <&lcd0 5 10>;
                                contribution = <15>;
                        };
index 37afec194949a3ca07f8ae476f1d681e4bdeedc4..198794963786bac2d46e4e9af0b39b28828669f2 100644 (file)
@@ -13,6 +13,11 @@ Required Properties:
     by the GPIO flags.
 - hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds).
 
+Optional Properties:
+- always-running: If the watchdog timer cannot be disabled, add this flag to
+  have the driver keep toggling the signal without a client. It will only cease
+  to toggle the signal when the device is open and the timeout elapsed.
+
 Example:
        watchdog: watchdog {
                /* ADM706 */
diff --git a/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt
new file mode 100644 (file)
index 0000000..b2fa11f
--- /dev/null
@@ -0,0 +1,19 @@
+*ImgTec PowerDown Controller (PDC) Watchdog Timer (WDT)
+
+Required properties:
+- compatible : Should be "img,pdc-wdt"
+- reg : Should contain WDT registers location and length
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Should contain "wdt" and "sys"; the watchdog counter
+               clock and register interface clock respectively.
+- interrupts : Should contain WDT interrupt
+
+Examples:
+
+watchdog@18102100 {
+       compatible = "img,pdc-wdt";
+       reg = <0x18102100 0x100>;
+       clocks = <&pdc_wdt_clk>, <&sys_clk>;
+       clock-names = "wdt", "sys";
+       interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
new file mode 100644 (file)
index 0000000..e27763e
--- /dev/null
@@ -0,0 +1,12 @@
+Ingenic Watchdog Timer (WDT) Controller for JZ4740
+
+Required properties:
+compatible: "ingenic,jz4740-watchdog"
+reg: Register address and length for watchdog registers
+
+Example:
+
+watchdog: jz4740-watchdog@0x10002000 {
+       compatible = "ingenic,jz4740-watchdog";
+       reg = <0x10002000 0x100>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
new file mode 100644 (file)
index 0000000..af9eb5b
--- /dev/null
@@ -0,0 +1,13 @@
+Mediatek SoCs Watchdog timer
+
+Required properties:
+
+- compatible : should be "mediatek,mt6589-wdt"
+- reg : Specifies base physical address and size of the registers.
+
+Example:
+
+wdt: watchdog@010000000 {
+       compatible = "mediatek,mt6589-wdt";
+       reg = <0x10000000 0x18>;
+};
index 766658ccf235da132ef83e9d466de03ee77a2e89..05d2280190f13d8d09e7a7597e0b7314b5d77cb0 100644 (file)
@@ -113,6 +113,31 @@ need to initialize a few fields in there:
   * channels:  should be initialized as a list using the
                INIT_LIST_HEAD macro for example
 
+  * src_addr_widths:
+    - should contain a bitmask of the supported source transfer width
+
+  * dst_addr_widths:
+    - should contain a bitmask of the supported destination transfer
+      width
+
+  * directions:
+    - should contain a bitmask of the supported slave directions
+      (i.e. excluding mem2mem transfers)
+
+  * residue_granularity:
+    - Granularity of the transfer residue reported to dma_set_residue.
+    - This can be either:
+      + Descriptor
+        -> Your device doesn't support any kind of residue
+           reporting. The framework will only know that a particular
+           transaction descriptor is done.
+      + Segment
+        -> Your device is able to report which chunks have been
+           transferred
+      + Burst
+        -> Your device is able to report which burst have been
+           transferred
+
   * dev:       should hold the pointer to the struct device associated
                to your current driver instance.
 
@@ -274,48 +299,36 @@ supported.
        account the current period.
      - This function can be called in an interrupt context.
 
-   * device_control
-     - Used by client drivers to control and configure the channel it
-       has a handle on.
-     - Called with a command and an argument
-       + The command is one of the values listed by the enum
-         dma_ctrl_cmd. The valid commands are:
-         + DMA_PAUSE
-           + Pauses a transfer on the channel
-           + This command should operate synchronously on the channel,
-             pausing right away the work of the given channel
-         + DMA_RESUME
-           + Restarts a transfer on the channel
-           + This command should operate synchronously on the channel,
-             resuming right away the work of the given channel
-         + DMA_TERMINATE_ALL
-           + Aborts all the pending and ongoing transfers on the
-             channel
-           + This command should operate synchronously on the channel,
-             terminating right away all the channels
-         + DMA_SLAVE_CONFIG
-           + Reconfigures the channel with passed configuration
-           + This command should NOT perform synchronously, or on any
-             currently queued transfers, but only on subsequent ones
-           + In this case, the function will receive a
-             dma_slave_config structure pointer as an argument, that
-             will detail which configuration to use.
-           + Even though that structure contains a direction field,
-             this field is deprecated in favor of the direction
-             argument given to the prep_* functions
-         + FSLDMA_EXTERNAL_START
-           + TODO: Why does that even exist?
-       + The argument is an opaque unsigned long. This actually is a
-         pointer to a struct dma_slave_config that should be used only
-         in the DMA_SLAVE_CONFIG.
-
-  * device_slave_caps
-    - Called through the framework by client drivers in order to have
-      an idea of what are the properties of the channel allocated to
-      them.
-    - Such properties are the buswidth, available directions, etc.
-    - Required for every generic layer doing DMA transfers, such as
-      ASoC.
+   * device_config
+     - Reconfigures the channel with the configuration given as
+       argument
+     - This command should NOT perform synchronously, or on any
+       currently queued transfers, but only on subsequent ones
+     - In this case, the function will receive a dma_slave_config
+       structure pointer as an argument, that will detail which
+       configuration to use.
+     - Even though that structure contains a direction field, this
+       field is deprecated in favor of the direction argument given to
+       the prep_* functions
+     - This call is mandatory for slave operations only. This should NOT be
+       set or expected to be set for memcpy operations.
+       If a driver support both, it should use this call for slave
+       operations only and not for memcpy ones.
+
+   * device_pause
+     - Pauses a transfer on the channel
+     - This command should operate synchronously on the channel,
+       pausing right away the work of the given channel
+
+   * device_resume
+     - Resumes a transfer on the channel
+     - This command should operate synchronously on the channel,
+       pausing right away the work of the given channel
+
+   * device_terminate_all
+     - Aborts all the pending and ongoing transfers on the channel
+     - This command should operate synchronously on the channel,
+       terminating right away all the channels
 
 Misc notes (stuff that should be documented, but don't really know
 where to put them)
index 2ca3d17eee56380cec075d5279c2703fa2b1ed9e..f91926f2f4824dee2c6785dcaf4cedd84df838e9 100644 (file)
@@ -164,8 +164,6 @@ the block device inode.  See there for more details.
 
 --------------------------- file_system_type ---------------------------
 prototypes:
-       int (*get_sb) (struct file_system_type *, int,
-                      const char *, void *, struct vfsmount *);
        struct dentry *(*mount) (struct file_system_type *, int,
                       const char *, void *);
        void (*kill_sb) (struct super_block *);
index 1b528b2ad809b8418cb352be8755de286b78882f..fcf4d509d1186f728b77e398c438af0850d9511c 100644 (file)
@@ -5,8 +5,8 @@ system.
 
 dlmfs is built with OCFS2 as it requires most of its infrastructure.
 
-Project web page:    http://oss.oracle.com/projects/ocfs2
-Tools web page:      http://oss.oracle.com/projects/ocfs2-tools
+Project web page:    http://ocfs2.wiki.kernel.org
+Tools web page:      https://github.com/markfasheh/ocfs2-tools
 OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
 
 All code copyright 2005 Oracle except when otherwise noted.
index 28f8c08201e29e0e90e2a489997f7a6b1d78dabb..4c49e5410595c0ee86447ed6312d58315e2e974c 100644 (file)
@@ -8,8 +8,8 @@ also make it attractive for non-clustered use.
 You'll want to install the ocfs2-tools package in order to at least
 get "mount.ocfs2" and "ocfs2_hb_ctl".
 
-Project web page:    http://oss.oracle.com/projects/ocfs2
-Tools web page:      http://oss.oracle.com/projects/ocfs2-tools
+Project web page:    http://ocfs2.wiki.kernel.org
+Tools git tree:      https://github.com/markfasheh/ocfs2-tools
 OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
 
 All code copyright 2005 Oracle except when otherwise noted.
index a27c950ece61b0d312fbba2a3757fab71c5b3616..6db0e5d1da07ee52d13f5997b84b3b10ef43e2dd 100644 (file)
@@ -159,6 +159,22 @@ overlay filesystem (though an operation on the name of the file such as
 rename or unlink will of course be noticed and handled).
 
 
+Multiple lower layers
+---------------------
+
+Multiple lower layers can now be given using the the colon (":") as a
+separator character between the directory names.  For example:
+
+  mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged
+
+As the example shows, "upperdir=" and "workdir=" may be omitted.  In
+that case the overlay will be read-only.
+
+The specified lower directories will be stacked beginning from the
+rightmost one and going left.  In the above example lower1 will be the
+top, lower2 the middle and lower3 the bottom layer.
+
+
 Non-standard behavior
 ---------------------
 
@@ -196,3 +212,15 @@ Changes to the underlying filesystems while part of a mounted overlay
 filesystem are not allowed.  If the underlying filesystem is changed,
 the behavior of the overlay is undefined, though it will not result in
 a crash or deadlock.
+
+Testsuite
+---------
+
+There's testsuite developed by David Howells at:
+
+  git://git.infradead.org/users/dhowells/unionmount-testsuite.git
+
+Run as root:
+
+  # cd unionmount-testsuite
+  # ./run --ov
index 4556a3eb87c454f3db1c9efbd9c057f454c135d6..4aae8ed15873d1d3c1cc8fc9d80d3ef9b42e5654 100644 (file)
@@ -12,7 +12,7 @@ FUNCTIONALITY CONSTANTS
 -----------------------
 
 For the most up-to-date list of functionality constants, please check
-<linux/i2c.h>!
+<uapi/linux/i2c.h>!
 
   I2C_FUNC_I2C                    Plain i2c-level commands (Pure SMBus
                                   adapters typically can not do these)
diff --git a/Documentation/ia64/paravirt_ops.txt b/Documentation/ia64/paravirt_ops.txt
deleted file mode 100644 (file)
index 39ded02..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-Paravirt_ops on IA64
-====================
-                          21 May 2008, Isaku Yamahata <yamahata@valinux.co.jp>
-
-
-Introduction
-------------
-The aim of this documentation is to help with maintainability and/or to
-encourage people to use paravirt_ops/IA64.
-
-paravirt_ops (pv_ops in short) is a way for virtualization support of
-Linux kernel on x86. Several ways for virtualization support were
-proposed, paravirt_ops is the winner.
-On the other hand, now there are also several IA64 virtualization
-technologies like kvm/IA64, xen/IA64 and many other academic IA64
-hypervisors so that it is good to add generic virtualization
-infrastructure on Linux/IA64.
-
-
-What is paravirt_ops?
----------------------
-It has been developed on x86 as virtualization support via API, not ABI.
-It allows each hypervisor to override operations which are important for
-hypervisors at API level. And it allows a single kernel binary to run on
-all supported execution environments including native machine.
-Essentially paravirt_ops is a set of function pointers which represent
-operations corresponding to low level sensitive instructions and high
-level functionalities in various area. But one significant difference
-from usual function pointer table is that it allows optimization with
-binary patch. It is because some of these operations are very
-performance sensitive and indirect call overhead is not negligible.
-With binary patch, indirect C function call can be transformed into
-direct C function call or in-place execution to eliminate the overhead.
-
-Thus, operations of paravirt_ops are classified into three categories.
-- simple indirect call
-  These operations correspond to high level functionality so that the
-  overhead of indirect call isn't very important.
-
-- indirect call which allows optimization with binary patch
-  Usually these operations correspond to low level instructions. They
-  are called frequently and performance critical. So the overhead is
-  very important.
-
-- a set of macros for hand written assembly code
-  Hand written assembly codes (.S files) also need paravirtualization
-  because they include sensitive instructions or some of code paths in
-  them are very performance critical.
-
-
-The relation to the IA64 machine vector
----------------------------------------
-Linux/IA64 has the IA64 machine vector functionality which allows the
-kernel to switch implementations (e.g. initialization, ipi, dma api...)
-depending on executing platform.
-We can replace some implementations very easily defining a new machine
-vector. Thus another approach for virtualization support would be
-enhancing the machine vector functionality.
-But paravirt_ops approach was taken because
-- virtualization support needs wider support than machine vector does.
-  e.g. low level instruction paravirtualization. It must be
-       initialized very early before platform detection.
-
-- virtualization support needs more functionality like binary patch.
-  Probably the calling overhead might not be very large compared to the
-  emulation overhead of virtualization. However in the native case, the
-  overhead should be eliminated completely.
-  A single kernel binary should run on each environment including native,
-  and the overhead of paravirt_ops on native environment should be as
-  small as possible.
-
-- for full virtualization technology, e.g. KVM/IA64 or
-  Xen/IA64 HVM domain, the result would be
-  (the emulated platform machine vector. probably dig) + (pv_ops).
-  This means that the virtualization support layer should be under
-  the machine vector layer.
-
-Possibly it might be better to move some function pointers from
-paravirt_ops to machine vector. In fact, Xen domU case utilizes both
-pv_ops and machine vector.
-
-
-IA64 paravirt_ops
------------------
-In this section, the concrete paravirt_ops will be discussed.
-Because of the architecture difference between ia64 and x86, the
-resulting set of functions is very different from x86 pv_ops.
-
-- C function pointer tables
-They are not very performance critical so that simple C indirect
-function call is acceptable. The following structures are defined at
-this moment. For details see linux/include/asm-ia64/paravirt.h
-  - struct pv_info
-    This structure describes the execution environment.
-  - struct pv_init_ops
-    This structure describes the various initialization hooks.
-  - struct pv_iosapic_ops
-    This structure describes hooks to iosapic operations.
-  - struct pv_irq_ops
-    This structure describes hooks to irq related operations
-  - struct pv_time_op
-    This structure describes hooks to steal time accounting.
-
-- a set of indirect calls which need optimization
-Currently this class of functions correspond to a subset of IA64
-intrinsics. At this moment the optimization with binary patch isn't
-implemented yet.
-struct pv_cpu_op is defined. For details see
-linux/include/asm-ia64/paravirt_privop.h
-Mostly they correspond to ia64 intrinsics 1-to-1.
-Caveat: Now they are defined as C indirect function pointers, but in
-order to support binary patch optimization, they will be changed
-using GCC extended inline assembly code.
-
-- a set of macros for hand written assembly code (.S files)
-For maintenance purpose, the taken approach for .S files is single
-source code and compile multiple times with different macros definitions.
-Each pv_ops instance must define those macros to compile.
-The important thing here is that sensitive, but non-privileged
-instructions must be paravirtualized and that some privileged
-instructions also need paravirtualization for reasonable performance.
-Developers who modify .S files must be aware of that. At this moment
-an easy checker is implemented to detect paravirtualization breakage.
-But it doesn't cover all the cases.
-
-Sometimes this set of macros is called pv_cpu_asm_op. But there is no
-corresponding structure in the source code.
-Those macros mostly 1:1 correspond to a subset of privileged
-instructions. See linux/include/asm-ia64/native/inst.h.
-And some functions written in assembly also need to be overrided so
-that each pv_ops instance have to define some macros. Again see
-linux/include/asm-ia64/native/inst.h.
-
-
-Those structures must be initialized very early before start_kernel.
-Probably initialized in head.S using multi entry point or some other trick.
-For native case implementation see linux/arch/ia64/kernel/paravirt.c.
index 90bca6f988e115e046583c13b9d157ebc38bd280..a63e5e013a8cddee63b1d3520dd1c2c73e80dc31 100644 (file)
@@ -3,8 +3,8 @@ ALPS Touchpad Protocol
 
 Introduction
 ------------
-Currently the ALPS touchpad driver supports five protocol versions in use by
-ALPS touchpads, called versions 1, 2, 3, 4 and 5.
+Currently the ALPS touchpad driver supports seven protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, 4, 5, 6 and 7.
 
 Since roughly mid-2010 several new ALPS touchpads have been released and
 integrated into a variety of laptops and netbooks.  These new touchpads
@@ -240,3 +240,67 @@ For mt, the format is:
  byte 3:    0  x23  x22   x21 x20  x19  x18   x17
  byte 4:    0   x9   x8    x7  x6   x5   x4    x3
  byte 5:    0  x16  x15   x14 x13  x12  x11   x10
+
+ALPS Absolute Mode - Protocol Version 6
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0   X6   X5   X4   X3   X2   X1   X0
+ byte 2:    0   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+ byte 3:    ?   Y7   X7    ?    ?    M    R    L
+ byte 4:   Z7   Z6   Z5   Z4   Z3   Z2   Z1   Z0
+ byte 5:    0    1    1    1    1    1    1    1
+
+For touchpad packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0    0    0    0   x3   x2   x1   x0
+ byte 2:    0    0    0    0   y3   y2   y1   y0
+ byte 3:    ?   x7   x6   x5   x4    ?    r    l
+ byte 4:    ?   y7   y6   y5   y4    ?    ?    ?
+ byte 5:   z7   z6   z5   z4   z3   z2   z1   z0
+
+(v6 touchpad does not have middle button)
+
+ALPS Absolute Mode - Protocol Version 7
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    0    1    0    0    1    0    0    0
+ byte 1:    1    1    *    *    1    M    R    L
+ byte 2:   X7    1   X5   X4   X3   X2   X1   X0
+ byte 3:   Z6    1   Y6   X6    1   Y2   Y1   Y0
+ byte 4:   Y7    0   Y5   Y4   Y3    1    1    0
+ byte 5:  T&P    0   Z5   Z4   Z3   Z2   Z1   Z0
+
+For touchpad packet, the format is:
+
+         packet-fmt     b7     b6     b5     b4     b3     b2     b1     b0
+ byte 0: TWO & MULTI     L      1      R      M      1   Y0-2   Y0-1   Y0-0
+ byte 0: NEW             L      1   X1-5      1      1   Y0-2   Y0-1   Y0-0
+ byte 1:             Y0-10   Y0-9   Y0-8   Y0-7   Y0-6   Y0-5   Y0-4   Y0-3
+ byte 2:             X0-11      1  X0-10   X0-9   X0-8   X0-7   X0-6   X0-5
+ byte 3:             X1-11      1   X0-4   X0-3      1   X0-2   X0-1   X0-0
+ byte 4: TWO         X1-10    TWO   X1-9   X1-8   X1-7   X1-6   X1-5   X1-4
+ byte 4: MULTI       X1-10    TWO   X1-9   X1-8   X1-7   X1-6   Y1-5      1
+ byte 4: NEW         X1-10    TWO   X1-9   X1-8   X1-7   X1-6      0      0
+ byte 5: TWO & NEW   Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6   Y1-5   Y1-4
+ byte 5: MULTI       Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6    F-1    F-0
+
+ L:         Left button
+ R / M:     Non-clickpads: Right / Middle button
+            Clickpads: When > 2 fingers are down, and some fingers
+            are in the button area, then the 2 coordinates reported
+            are for fingers outside the button area and these report
+            extra fingers being present in the right / left button
+            area. Note these fingers are not added to the F field!
+            so if a TWO packet is received and R = 1 then there are
+            3 fingers down, etc.
+ TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
+            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+               in NEW fmt
+ F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
index a311db829e9bb6a819bc293404d65d5c3584a741..74b6c6d97210902d63fc6022e02968c5dc317c02 100644 (file)
@@ -524,15 +524,16 @@ more details, with real examples.
        Example:
                #arch/x86/Makefile
                cflags-y += $(shell \
-               if [ $(call cc-version) -ge 0300 ] ; then \
+               if [ $(cc-version) -ge 0300 ] ; then \
                        echo "-mregparm=3"; fi ;)
 
        In the above example, -mregparm=3 is only used for gcc version greater
        than or equal to gcc 3.0.
 
     cc-ifversion
-       cc-ifversion tests the version of $(CC) and equals last argument if
-       version expression is true.
+       cc-ifversion tests the version of $(CC) and equals the fourth parameter
+       if version expression is true, or the fifth (if given) if the version
+       expression is false.
 
        Example:
                #fs/reiserfs/Makefile
@@ -552,7 +553,7 @@ more details, with real examples.
 
        Example:
                #arch/powerpc/Makefile
-               $(Q)if test "$(call cc-fullversion)" = "040200" ; then \
+               $(Q)if test "$(cc-fullversion)" = "040200" ; then \
                        echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
                        false ; \
                fi
@@ -751,12 +752,12 @@ generated by kbuild are deleted all over the kernel src tree when
 Additional files can be specified in kbuild makefiles by use of $(clean-files).
 
        Example:
-               #drivers/pci/Makefile
-               clean-files := devlist.h classlist.h
+               #lib/Makefile
+               clean-files := crc32table.h
 
 When executing "make clean", the two files "devlist.h classlist.h" will be
 deleted. Kbuild will assume files to be in the same relative directory as the
-Makefile except if an absolute path is specified (path starting with '/').
+Makefile, except if prefixed with $(objtree).
 
 To delete a directory hierarchy use:
 
@@ -764,9 +765,8 @@ To delete a directory hierarchy use:
                #scripts/package/Makefile
                clean-dirs := $(objtree)/debian/
 
-This will delete the directory debian, including all subdirectories.
-Kbuild will assume the directories to be in the same relative path as the
-Makefile if no absolute path is specified (path does not start with '/').
+This will delete the directory debian in the toplevel directory, including all
+subdirectories.
 
 To exclude certain files from make clean, use the $(no-clean-files) variable.
 This is only a special case used in the top level Kbuild file:
index 1b8c964b0d175c7d86d86896469650e5c59fa689..4412f695a62f493230495114f6c72a87537909b2 100644 (file)
@@ -388,6 +388,16 @@ tcp_mtu_probing - INTEGER
          1 - Disabled by default, enabled when an ICMP black hole detected
          2 - Always enabled, use initial MSS of tcp_base_mss.
 
+tcp_probe_interval - INTEGER
+       Controls how often to start TCP Packetization-Layer Path MTU
+       Discovery reprobe. The default is reprobing every 10 minutes as
+       per RFC4821.
+
+tcp_probe_threshold - INTEGER
+       Controls when TCP Packetization-Layer Path MTU Discovery probing
+       will stop in respect to the width of search range in bytes. Default
+       is 8 bytes.
+
 tcp_no_metrics_save - BOOLEAN
        By default, TCP saves various connection metrics in the route cache
        when the connection closes, so that connections established in the
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
new file mode 100644 (file)
index 0000000..639ddf0
--- /dev/null
@@ -0,0 +1,20 @@
+/proc/sys/net/mpls/* Variables:
+
+platform_labels - INTEGER
+       Number of entries in the platform label table.  It is not
+       possible to configure forwarding for label values equal to or
+       greater than the number of platform labels.
+
+       A dense utliziation of the entries in the platform label table
+       is possible and expected aas the platform labels are locally
+       allocated.
+
+       If the number of platform label table entries is set to 0 no
+       label will be recognized by the kernel and mpls forwarding
+       will be disabled.
+
+       Reducing this value will remove all label routing entries that
+       no longer fit in the table.
+
+       Possible values: 0 - 1048575
+       Default: 0
index d2a9f43b5546684fec415e957617869e418ed140..0362a42f7cf4478b8592d02f20d3128acd560ead 100644 (file)
@@ -38,7 +38,7 @@ The corresponding adapter's LED will blink multiple times.
 
 3.     Features supported:
 a. Jumbo frames. Xframe I/II supports MTU up to 9600 bytes,
-modifiable using ifconfig command.
+modifiable using ip command.
 
 b. Offloads. Supports checksum offload(TCP/UDP/IP) on transmit
 and receive, TSO.
index bb76c667a476557df0416a43f2ad622247fbd65c..abfec245f97c6aa9cc6e4bc40dcf8c2324f496ec 100644 (file)
@@ -39,7 +39,7 @@ iii) PCI-SIG's I/O Virtualization
 
 iv)  Jumbo frames
        X3100 Series supports MTU up to 9600 bytes, modifiable using
-       ifconfig command.
+       ip command.
 
 v)   Offloads supported: (Enabled by default)
        Checksum offload (TCP/UDP/IP) on transmit and receive paths
index e952d30bbf0f8f1bed57fdaedaa4982386175160..af0d23968ee710983f8797ed41f91fc01a220010 100644 (file)
@@ -2,6 +2,9 @@ Virtualization support in the Linux kernel.
 
 00-INDEX
        - this file.
+
+paravirt_ops.txt
+       - Describes the Linux kernel pv_ops to support different hypervisors
 kvm/
        - Kernel Virtual Machine.  See also http://linux-kvm.org
 uml/
diff --git a/Documentation/virtual/paravirt_ops.txt b/Documentation/virtual/paravirt_ops.txt
new file mode 100644 (file)
index 0000000..d4881c0
--- /dev/null
@@ -0,0 +1,32 @@
+Paravirt_ops
+============
+
+Linux provides support for different hypervisor virtualization technologies.
+Historically different binary kernels would be required in order to support
+different hypervisors, this restriction was removed with pv_ops.
+Linux pv_ops is a virtualization API which enables support for different
+hypervisors. It allows each hypervisor to override critical operations and
+allows a single kernel binary to run on all supported execution environments
+including native machine -- without any hypervisors.
+
+pv_ops provides a set of function pointers which represent operations
+corresponding to low level critical instructions and high level
+functionalities in various areas. pv-ops allows for optimizations at run
+time by enabling binary patching of the low-ops critical operations
+at boot time.
+
+pv_ops operations are classified into three categories:
+
+- simple indirect call
+  These operations correspond to high level functionality where it is
+  known that the overhead of indirect call isn't very important.
+
+- indirect call which allows optimization with binary patch
+  Usually these operations correspond to low level critical instructions. They
+  are called frequently and are performance critical. The overhead is
+  very important.
+
+- a set of macros for hand written assembly code
+  Hand written assembly codes (.S files) also need paravirtualization
+  because they include sensitive instructions or some of code paths in
+  them are very performance critical.
index 199f453cb4de10016030c2dd230fb9a3a3125cee..82fbdbc1e0b0626611462132d38fc29c5ee27078 100644 (file)
@@ -3,7 +3,7 @@ protocol of kernel. These should be filled by bootloader or 16-bit
 real-mode setup code of the kernel. References/settings to it mainly
 are in:
 
-  arch/x86/include/asm/bootparam.h
+  arch/x86/include/uapi/asm/bootparam.h
 
 
 Offset Proto   Name            Meaning
diff --git a/Kbuild b/Kbuild
index b8b708ad6dc3815eb0d23bfea2c972d03b9477c0..ab8ded92e870f55290d4d8283e4cbb1b849f8a58 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -5,24 +5,23 @@
 # 2) Generate asm-offsets.h (may need bounds.h)
 # 3) Check for missing system calls
 
-#####
-# 1) Generate bounds.h
-
-bounds-file := include/generated/bounds.h
-
-always  := $(bounds-file)
-targets := $(bounds-file) kernel/bounds.s
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+       "/^->/{s:->#\(.*\):/* \1 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:->::; p;}"
+endef
 
-quiet_cmd_bounds = GEN     $@
-define cmd_bounds
+quiet_cmd_offsets = GEN     $@
+define cmd_offsets
        (set -e; \
-        echo "#ifndef __LINUX_BOUNDS_H__"; \
-        echo "#define __LINUX_BOUNDS_H__"; \
+        echo "#ifndef $2"; \
+        echo "#define $2"; \
         echo "/*"; \
         echo " * DO NOT MODIFY."; \
         echo " *"; \
         echo " * This file was generated by Kbuild"; \
-        echo " *"; \
         echo " */"; \
         echo ""; \
         sed -ne $(sed-y) $<; \
@@ -30,6 +29,14 @@ define cmd_bounds
         echo "#endif" ) > $@
 endef
 
+#####
+# 1) Generate bounds.h
+
+bounds-file := include/generated/bounds.h
+
+always  := $(bounds-file)
+targets := $(bounds-file) kernel/bounds.s
+
 # We use internal kbuild rules to avoid the "is up to date" message from make
 kernel/bounds.s: kernel/bounds.c FORCE
        $(Q)mkdir -p $(dir $@)
@@ -37,7 +44,7 @@ kernel/bounds.s: kernel/bounds.c FORCE
 
 $(obj)/$(bounds-file): kernel/bounds.s Kbuild
        $(Q)mkdir -p $(dir $@)
-       $(call cmd,bounds)
+       $(call cmd,offsets,__LINUX_BOUNDS_H__)
 
 #####
 # 2) Generate asm-offsets.h
@@ -49,32 +56,6 @@ always  += $(offsets-file)
 targets += $(offsets-file)
 targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 
-
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
-       "/^->/{s:->#\(.*\):/* \1 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
-       s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN     $@
-define cmd_offsets
-       (set -e; \
-        echo "#ifndef __ASM_OFFSETS_H__"; \
-        echo "#define __ASM_OFFSETS_H__"; \
-        echo "/*"; \
-        echo " * DO NOT MODIFY."; \
-        echo " *"; \
-        echo " * This file was generated by Kbuild"; \
-        echo " *"; \
-        echo " */"; \
-        echo ""; \
-        sed -ne $(sed-y) $<; \
-        echo ""; \
-        echo "#endif" ) > $@
-endef
-
 # We use internal kbuild rules to avoid the "is up to date" message from make
 arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
                                       $(obj)/$(bounds-file) FORCE
@@ -82,7 +63,7 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
        $(call if_changed_dep,cc_s_c)
 
 $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
-       $(call cmd,offsets)
+       $(call cmd,offsets,__ASM_OFFSETS_H__)
 
 #####
 # 3) Check for missing system calls
index 274a0058f3f2f1db923fbe974c81363151ebca1f..a6ae6eb0c545b354cc5f4fe12264808c38c35dbc 100644 (file)
@@ -2065,7 +2065,7 @@ F:        include/net/bluetooth/
 BONDING DRIVER
 M:     Jay Vosburgh <j.vosburgh@gmail.com>
 M:     Veaceslav Falico <vfalico@gmail.com>
-M:     Andy Gospodarek <andy@greyhouse.net>
+M:     Andy Gospodarek <gospo@cumulusnetworks.com>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -2433,7 +2433,8 @@ F:        arch/powerpc/oprofile/*cell*
 F:     arch/powerpc/platforms/cell/
 
 CEPH DISTRIBUTED FILE SYSTEM CLIENT
-M:     Sage Weil <sage@inktank.com>
+M:     Yan, Zheng <zyan@redhat.com>
+M:     Sage Weil <sage@redhat.com>
 L:     ceph-devel@vger.kernel.org
 W:     http://ceph.com/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
@@ -3936,7 +3937,7 @@ S:        Maintained
 F:     drivers/staging/fbtft/
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
-M:     Robert Love <robert.w.love@intel.com>
+M:     Vasu Dev <vasu.dev@intel.com>
 L:     fcoe-devel@open-fcoe.org
 W:     www.Open-FCoE.org
 S:     Supported
@@ -4092,6 +4093,12 @@ S:       Maintained
 F:     include/linux/platform_data/video-imxfb.h
 F:     drivers/video/fbdev/imxfb.c
 
+FREESCALE QUAD SPI DRIVER
+M:     Han Xu <han.xu@freescale.com>
+L:     linux-mtd@lists.infradead.org
+S:     Maintained
+F:     drivers/mtd/spi-nor/fsl-quadspi.c
+
 FREESCALE SOC FS_ENET DRIVER
 M:     Pantelis Antoniou <pantelis.antoniou@gmail.com>
 M:     Vitaly Bordug <vbordug@ru.mvista.com>
@@ -7206,8 +7213,7 @@ ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
 M:     Mark Fasheh <mfasheh@suse.com>
 M:     Joel Becker <jlbec@evilplan.org>
 L:     ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
-W:     http://oss.oracle.com/projects/ocfs2/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
+W:     http://ocfs2.wiki.kernel.org
 S:     Supported
 F:     Documentation/filesystems/ocfs2.txt
 F:     Documentation/filesystems/dlmfs.txt
@@ -7296,7 +7302,7 @@ M:        Alok Kataria <akataria@vmware.com>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 L:     virtualization@lists.linux-foundation.org
 S:     Supported
-F:     Documentation/ia64/paravirt_ops.txt
+F:     Documentation/virtual/paravirt_ops.txt
 F:     arch/*/kernel/paravirt*
 F:     arch/*/include/asm/paravirt.h
 
@@ -7992,8 +7998,8 @@ S:        Supported
 F:     drivers/net/wireless/ath/wcn36xx/
 
 RADOS BLOCK DEVICE (RBD)
-M:     Yehuda Sadeh <yehuda@inktank.com>
-M:     Sage Weil <sage@inktank.com>
+M:     Ilya Dryomov <idryomov@gmail.com>
+M:     Sage Weil <sage@redhat.com>
 M:     Alex Elder <elder@kernel.org>
 M:     ceph-devel@vger.kernel.org
 W:     http://ceph.com/
@@ -8496,6 +8502,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
 M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:     Maintained
+F:     include/linux/dma/dw.h
 F:     include/linux/platform_data/dma-dw.h
 F:     drivers/dma/dw/
 
@@ -8558,7 +8565,7 @@ S:        Maintained
 F:     drivers/scsi/sr*
 
 SCSI RDMA PROTOCOL (SRP) INITIATOR
-M:     Bart Van Assche <bvanassche@acm.org>
+M:     Bart Van Assche <bart.vanassche@sandisk.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.openfabrics.org
@@ -9710,6 +9717,11 @@ L:       linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
+TI CDCE706 CLOCK DRIVER
+M:     Max Filippov <jcmvbkbc@gmail.com>
+S:     Maintained
+F:     drivers/clk/clk-cdce706.c
+
 TI CLOCK DRIVER
 M:     Tero Kristo <t-kristo@ti.com>
 L:     linux-omap@vger.kernel.org
index dd8796caa23982428676d25ed78967d3b2d0e069..e6a9b1b94656b6e70cd2735cc1fcdaea32789cf4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
-VERSION = 3
-PATCHLEVEL = 19
+VERSION = 4
+PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Diseased Newt
+EXTRAVERSION = -rc2
+NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -502,7 +502,7 @@ endif
 ifeq ($(KBUILD_EXTMOD),)
         ifneq ($(filter config %config,$(MAKECMDGOALS)),)
                 config-targets := 1
-                ifneq ($(filter-out config %config,$(MAKECMDGOALS)),)
+                ifneq ($(words $(MAKECMDGOALS)),1)
                         mixed-targets := 1
                 endif
         endif
@@ -1180,7 +1180,7 @@ CLEAN_DIRS  += $(MODVERDIR)
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config usr/include include/generated          \
                  arch/*/include/generated .tmp_objdiff
-MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
+MRPROPER_FILES += .config .config.old .version .old_version \
                  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
                  signing_key.priv signing_key.x509 x509.genkey         \
                  extra_certificates signing_key.x509.keyid             \
index 766fdfde2b7aa0329cc42681f2fb907b08ce9064..9b0d40093c9a377ff142ffe2241c44b152e41ffd 100644 (file)
@@ -27,7 +27,7 @@
 #define get_ds()  (KERNEL_DS)
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a,b)        ((a).seg == (b).seg)
+#define segment_eq(a, b)       ((a).seg == (b).seg)
 
 /*
  * Is a address valid? This does a straightforward calculation rather
  *  - AND "addr+size" doesn't have any high-bits set
  *  - OR we are in kernel mode.
  */
-#define __access_ok(addr,size,segment) \
+#define __access_ok(addr, size, segment) \
        (((segment).seg & (addr | size | (addr+size))) == 0)
 
-#define access_ok(type,addr,size)                              \
+#define access_ok(type, addr, size)                            \
 ({                                                             \
        __chk_user_ptr(addr);                                   \
-       __access_ok(((unsigned long)(addr)),(size),get_fs());   \
+       __access_ok(((unsigned long)(addr)), (size), get_fs()); \
 })
 
 /*
  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
  * (b) require any knowledge of processes at this stage
  */
-#define put_user(x,ptr) \
-  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
-#define get_user(x,ptr) \
-  __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
+#define put_user(x, ptr) \
+  __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
+#define get_user(x, ptr) \
+  __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
 
 /*
  * The "__xxx" versions do not do address space checking, useful when
  * doing multiple accesses to the same area (the programmer has to do the
  * checks by hand with "access_ok()")
  */
-#define __put_user(x,ptr) \
-  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-#define __get_user(x,ptr) \
-  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+  __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+  __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
   
 /*
  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
@@ -84,7 +84,7 @@
 
 extern void __get_user_unknown(void);
 
-#define __get_user_nocheck(x,ptr,size)                         \
+#define __get_user_nocheck(x, ptr, size)                       \
 ({                                                             \
        long __gu_err = 0;                                      \
        unsigned long __gu_val;                                 \
@@ -96,16 +96,16 @@ extern void __get_user_unknown(void);
          case 8: __get_user_64(ptr); break;                    \
          default: __get_user_unknown(); break;                 \
        }                                                       \
-       (x) = (__typeof__(*(ptr))) __gu_val;                    \
+       (x) = (__force __typeof__(*(ptr))) __gu_val;            \
        __gu_err;                                               \
 })
 
-#define __get_user_check(x,ptr,size,segment)                           \
+#define __get_user_check(x, ptr, size, segment)                                \
 ({                                                                     \
        long __gu_err = -EFAULT;                                        \
        unsigned long __gu_val = 0;                                     \
        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
-       if (__access_ok((unsigned long)__gu_addr,size,segment)) {       \
+       if (__access_ok((unsigned long)__gu_addr, size, segment)) {     \
                __gu_err = 0;                                           \
                switch (size) {                                         \
                  case 1: __get_user_8(__gu_addr); break;               \
@@ -115,7 +115,7 @@ extern void __get_user_unknown(void);
                  default: __get_user_unknown(); break;                 \
                }                                                       \
        }                                                               \
-       (x) = (__typeof__(*(ptr))) __gu_val;                            \
+       (x) = (__force __typeof__(*(ptr))) __gu_val;                    \
        __gu_err;                                                       \
 })
 
@@ -201,31 +201,31 @@ struct __large_struct { unsigned long buf[100]; };
 
 extern void __put_user_unknown(void);
 
-#define __put_user_nocheck(x,ptr,size)                         \
+#define __put_user_nocheck(x, ptr, size)                       \
 ({                                                             \
        long __pu_err = 0;                                      \
        __chk_user_ptr(ptr);                                    \
        switch (size) {                                         \
-         case 1: __put_user_8(x,ptr); break;                   \
-         case 2: __put_user_16(x,ptr); break;                  \
-         case 4: __put_user_32(x,ptr); break;                  \
-         case 8: __put_user_64(x,ptr); break;                  \
+         case 1: __put_user_8(x, ptr); break;                  \
+         case 2: __put_user_16(x, ptr); break;                 \
+         case 4: __put_user_32(x, ptr); break;                 \
+         case 8: __put_user_64(x, ptr); break;                 \
          default: __put_user_unknown(); break;                 \
        }                                                       \
        __pu_err;                                               \
 })
 
-#define __put_user_check(x,ptr,size,segment)                           \
+#define __put_user_check(x, ptr, size, segment)                                \
 ({                                                                     \
        long __pu_err = -EFAULT;                                        \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
-       if (__access_ok((unsigned long)__pu_addr,size,segment)) {       \
+       if (__access_ok((unsigned long)__pu_addr, size, segment)) {     \
                __pu_err = 0;                                           \
                switch (size) {                                         \
-                 case 1: __put_user_8(x,__pu_addr); break;             \
-                 case 2: __put_user_16(x,__pu_addr); break;            \
-                 case 4: __put_user_32(x,__pu_addr); break;            \
-                 case 8: __put_user_64(x,__pu_addr); break;            \
+                 case 1: __put_user_8(x, __pu_addr); break;            \
+                 case 2: __put_user_16(x, __pu_addr); break;           \
+                 case 4: __put_user_32(x, __pu_addr); break;           \
+                 case 8: __put_user_64(x, __pu_addr); break;           \
                  default: __put_user_unknown(); break;                 \
                }                                                       \
        }                                                               \
@@ -237,7 +237,7 @@ extern void __put_user_unknown(void);
  * instead of writing: this is because they do not write to
  * any memory gcc knows about, so there are no aliasing issues
  */
-#define __put_user_64(x,addr)                                  \
+#define __put_user_64(x, addr)                                 \
 __asm__ __volatile__("1: stq %r2,%1\n"                         \
        "2:\n"                                                  \
        ".section __ex_table,\"a\"\n"                           \
@@ -247,7 +247,7 @@ __asm__ __volatile__("1: stq %r2,%1\n"                              \
                : "=r"(__pu_err)                                \
                : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
 
-#define __put_user_32(x,addr)                                  \
+#define __put_user_32(x, addr)                                 \
 __asm__ __volatile__("1: stl %r2,%1\n"                         \
        "2:\n"                                                  \
        ".section __ex_table,\"a\"\n"                           \
@@ -260,7 +260,7 @@ __asm__ __volatile__("1: stl %r2,%1\n"                              \
 #ifdef __alpha_bwx__
 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
 
-#define __put_user_16(x,addr)                                  \
+#define __put_user_16(x, addr)                                 \
 __asm__ __volatile__("1: stw %r2,%1\n"                         \
        "2:\n"                                                  \
        ".section __ex_table,\"a\"\n"                           \
@@ -270,7 +270,7 @@ __asm__ __volatile__("1: stw %r2,%1\n"                              \
                : "=r"(__pu_err)                                \
                : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 
-#define __put_user_8(x,addr)                                   \
+#define __put_user_8(x, addr)                                  \
 __asm__ __volatile__("1: stb %r2,%1\n"                         \
        "2:\n"                                                  \
        ".section __ex_table,\"a\"\n"                           \
@@ -283,7 +283,7 @@ __asm__ __volatile__("1: stb %r2,%1\n"                              \
 /* Unfortunately, we can't get an unaligned access trap for the sub-word
    write, so we have to do a general unaligned operation.  */
 
-#define __put_user_16(x,addr)                                  \
+#define __put_user_16(x, addr)                                 \
 {                                                              \
        long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;        \
        __asm__ __volatile__(                                   \
@@ -308,13 +308,13 @@ __asm__ __volatile__("1: stb %r2,%1\n"                            \
        "       .long 4b - .\n"                                 \
        "       lda $31, 5b-4b(%0)\n"                           \
        ".previous"                                             \
-               : "=r"(__pu_err), "=&r"(__pu_tmp1),             \
-                 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),           \
+               : "=r"(__pu_err), "=&r"(__pu_tmp1),             \
+                 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),           \
                  "=&r"(__pu_tmp4)                              \
                : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
 }
 
-#define __put_user_8(x,addr)                                   \
+#define __put_user_8(x, addr)                                  \
 {                                                              \
        long __pu_tmp1, __pu_tmp2;                              \
        __asm__ __volatile__(                                   \
@@ -330,7 +330,7 @@ __asm__ __volatile__("1: stb %r2,%1\n"                              \
        "       .long 2b - .\n"                                 \
        "       lda $31, 3b-2b(%0)\n"                           \
        ".previous"                                             \
-               : "=r"(__pu_err),                               \
+               : "=r"(__pu_err),                               \
                  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)            \
                : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
 }
@@ -366,7 +366,7 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
                : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
                : __module_address(__copy_user)
                  "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
-               : "$1","$2","$3","$4","$5","$28","memory");
+               : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
 
        return __cu_len;
 }
@@ -379,15 +379,15 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
        return len;
 }
 
-#define __copy_to_user(to,from,n)                                      \
+#define __copy_to_user(to, from, n)                                    \
 ({                                                                     \
        __chk_user_ptr(to);                                             \
-       __copy_tofrom_user_nocheck((__force void *)(to),(from),(n));    \
+       __copy_tofrom_user_nocheck((__force void *)(to), (from), (n));  \
 })
-#define __copy_from_user(to,from,n)                                    \
+#define __copy_from_user(to, from, n)                                  \
 ({                                                                     \
        __chk_user_ptr(from);                                           \
-       __copy_tofrom_user_nocheck((to),(__force void *)(from),(n));    \
+       __copy_tofrom_user_nocheck((to), (__force void *)(from), (n));  \
 })
 
 #define __copy_to_user_inatomic __copy_to_user
@@ -418,7 +418,7 @@ __clear_user(void __user *to, long len)
                : "=r"(__cl_len), "=r"(__cl_to)
                : __module_address(__do_clear_user)
                  "0"(__cl_len), "1"(__cl_to)
-               : "$1","$2","$3","$4","$5","$28","memory");
+               : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
        return __cl_len;
 }
 
index a098d7c05e967461cc200fbb52d1acb51dcee966..cfb5052239a1cffdcb3f8ac3dc07537441d55bef 100644 (file)
                        chan_allocation_order = <0>;
                        chan_priority = <1>;
                        block_size = <0x7ff>;
-                       data_width = <2 0 0 0>;
+                       data_width = <2>;
                        clocks = <&ahb_clk>;
                        clock-names = "hclk";
                };
index 6cc25ed912eeff57fc9bd06cc390c6ece3e56b14..2c6248d9a9efcda29f6469987ddb2a9396b0ff82 100644 (file)
 
 &usb0 {
        status = "okay";
+       dr_mode = "peripheral";
 };
 
 &usb1 {
index f9a17e2ca8cb068dfa63034d43a1077fb0a606bb..0198f5a62b96cd8b8569d4ae5d6dfd2938fcd580 100644 (file)
                >;
        };
 
-       i2c1_pins_default: i2c1_pins_default {
-               pinctrl-single,pins = <
-                       0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */
-                       0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */
-               >;
-       };
-
-       i2c1_pins_sleep: i2c1_pins_sleep {
-               pinctrl-single,pins = <
-                       0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */
-                       0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */
-               >;
-       };
-
        mmc1_pins_default: pinmux_mmc1_pins_default {
                pinctrl-single,pins = <
                        0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
        status = "okay";
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&i2c0_pins_default>;
-       pinctrl-1 = <&i2c0_pins_default>;
+       pinctrl-1 = <&i2c0_pins_sleep>;
        clock-frequency = <400000>;
 
        at24@50 {
                pagesize = <64>;
                reg = <0x50>;
        };
-};
-
-&i2c1 {
-       status = "okay";
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&i2c1_pins_default>;
-       pinctrl-1 = <&i2c1_pins_default>;
-       clock-frequency = <400000>;
 
        tps: tps62362@60 {
                compatible = "ti,tps62362";
+               reg = <0x60>;
                regulator-name = "VDD_MPU";
                regulator-min-microvolt = <950000>;
                regulator-max-microvolt = <1330000>;
index 03750af3b49a41493035217c34c16984343b226c..6463f9ef2b548208bda288a78a07ab3353aa2220 100644 (file)
        pinctrl-0 = <&usb1_pins>;
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-       extcon = <&extcon_usb2>;
-};
-
 &usb2 {
        dr_mode = "peripheral";
 };
index fff0ee69aab495361898b503268fd300a02832c1..9f7c7376f2cfcb42f93a19128e5b652da81a1f58 100644 (file)
                        };
 
                        macb0: ethernet@fffc4000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xfffc4000 0x100>;
                                interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 1f67bb4c144eef2489891b63b64edda96402f100..340179ef6ba022f9d23c96c136cab2a088803ead 100644 (file)
                        };
 
                        macb0: ethernet@fffbc000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xfffbc000 0x100>;
                                interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index ee80aa9c0759c17f178965181fb28f2a562f6b13..586eab7b653d052c2d5e2311452022da0f4e0a9c 100644 (file)
                        };
 
                        macb0: ethernet@fffbc000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xfffbc000 0x100>;
                                interrupts = <25 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 57e89d1d03253fd64af8f4c01f807caf9611dfaf..73d7e30965badd14366540fe2cb9ce6780d67f2e 100644 (file)
@@ -53,7 +53,7 @@
                        };
 
                        macb0: ethernet@f802c000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 663676c02861ff2e35118f5526d36847edb904e0..d81980c40c7d05a7525fcb1b9f1b09892325dc26 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f8030000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf8030000 0x100>;
                                interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 5126f9e77a9883ceb4f8af0f24a46d17096e084c..ff5fb6ab0b9748dbecd27fd7432c4f8306f5221d 100644 (file)
                };
        };
 
+       i2c0: i2c@18008000 {
+               compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+               reg = <0x18008000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+               status = "disabled";
+       };
+
+       i2c1: i2c@1800b000 {
+               compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+               reg = <0x1800b000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+               status = "disabled";
+       };
+
        uart0: serial@18020000 {
                compatible = "snps,dw-apb-uart";
                reg = <0x18020000 0x100>;
index d2d8e94e0aa2042b80dff2d34b49bad66e16345d..f46329c8ad75c00c068e269565bdb1b931083f47 100644 (file)
@@ -66,8 +66,9 @@
                        reg = <0x1d000 0x1000>;
                        cache-unified;
                        cache-level = <2>;
-                       cache-sets = <16>;
-                       cache-size = <0x80000>;
+                       cache-size = <524288>;
+                       cache-sets = <1024>;
+                       cache-line-size = <32>;
                        interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index 857d0289ad4d7c9ed81268a2d2a20c923b2177a0..d3a29c1b841727f58200a37ac14b03e96a18cdd2 100644 (file)
                        DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0)     /* SPI_D1 */
                >;
        };
+
+       usb0_pins: pinmux_usb0_pins {
+               pinctrl-single,pins = <
+                       DM816X_IOPAD(0x0d00, MUX_MODE0)                 /* USB0_DRVVBUS */
+               >;
+       };
+
+       usb1_pins: pinmux_usb0_pins {
+               pinctrl-single,pins = <
+                       DM816X_IOPAD(0x0d04, MUX_MODE0)                 /* USB1_DRVVBUS */
+               >;
+       };
 };
 
 &i2c1 {
 &mmc1 {
        vmmc-supply = <&vmmcsd_fixed>;
 };
+
+/* At least dm8168-evm rev c won't support multipoint, later may */
+&usb0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&usb0_pins>;
+       mentor,multipoint = <0>;
+};
+
+&usb1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&usb1_pins>;
+       mentor,multipoint = <0>;
+};
index d98d0f7de380d95d956aa6826dd626c53d500be7..3c97b5f2addc12a86639ec7b8e63e92349831236 100644 (file)
 
                        /* Device Configuration Registers */
                        scm_conf: syscon@600 {
-                               compatible = "syscon";
+                               compatible = "syscon", "simple-bus";
                                reg = <0x600 0x110>;
                                #address-cells = <1>;
                                #size-cells = <1>;
+                               ranges = <0 0x600 0x110>;
+
+                               usb_phy0: usb-phy@20 {
+                                       compatible = "ti,dm8168-usb-phy";
+                                       reg = <0x20 0x8>;
+                                       reg-names = "phy";
+                                       clocks = <&main_fapll 6>;
+                                       clock-names = "refclk";
+                                       #phy-cells = <0>;
+                                       syscon = <&scm_conf>;
+                               };
+
+                               usb_phy1: usb-phy@28 {
+                                       compatible = "ti,dm8168-usb-phy";
+                                       reg = <0x28 0x8>;
+                                       reg-names = "phy";
+                                       clocks = <&main_fapll 6>;
+                                       clock-names = "refclk";
+                                       #phy-cells = <0>;
+                                       syscon = <&scm_conf>;
+                               };
                        };
 
                        scrm_clocks: clocks {
                                reg-names = "mc", "control";
                                interrupts = <18>;
                                interrupt-names = "mc";
-                               dr_mode = "otg";
+                               dr_mode = "host";
+                               interface-type = <0>;
+                               phys = <&usb_phy0>;
+                               phy-names = "usb2-phy";
                                mentor,multipoint = <1>;
                                mentor,num-eps = <16>;
                                mentor,ram-bits = <12>;
 
                        usb1: usb@47401800 {
                                compatible = "ti,musb-am33xx";
-                               status = "disabled";
                                reg = <0x47401c00 0x400
                                       0x47401800 0x200>;
                                reg-names = "mc", "control";
                                interrupts = <19>;
                                interrupt-names = "mc";
-                               dr_mode = "otg";
+                               dr_mode = "host";
+                               interface-type = <0>;
+                               phys = <&usb_phy1>;
+                               phy-names = "usb2-phy";
                                mentor,multipoint = <1>;
                                mentor,num-eps = <16>;
                                mentor,ram-bits = <12>;
index 746cddb1b8f538e1d1fed717b077998f4a46adc8..3290a96ba586a7262c2eb5ce171655cbe7124d50 100644 (file)
        };
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-       extcon = <&extcon_usb2>;
-};
-
 &usb1 {
        dr_mode = "peripheral";
        pinctrl-names = "default";
index 5827fedafd43d58a00169a4186cbb595d6f11234..127608d79033e32e76143b0707918134f6450283 100644 (file)
                                     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <127>;
+                       dma-channels = <32>;
+                       dma-requests = <127>;
                };
 
                gpio1: gpio@4ae10000 {
                                      <0x4A096800 0x40>; /* pll_ctrl */
                                reg-names = "phy_rx", "phy_tx", "pll_ctrl";
                                ctrl-module = <&omap_control_sata>;
-                               clocks = <&sys_clkin1>;
-                               clock-names = "sysclk";
+                               clocks = <&sys_clkin1>, <&sata_ref_clk>;
+                               clock-names = "sysclk", "refclk";
                                #phy-cells = <0>;
                        };
 
index 4d87117136108873d2410ffaa68010652fd2e356..e0264d0bf7b97118b19aa9af21611e36e832608b 100644 (file)
        phy-supply = <&ldo4_reg>;
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-       extcon = <&extcon_usb2>;
-};
-
 &usb1 {
        dr_mode = "peripheral";
        pinctrl-names = "default";
index 59d1c297bb30f17aaab458f94e6ba52363c635cb..578fa2a54dce35482ba2d7b7fd1d42ce67a762ac 100644 (file)
@@ -87,8 +87,8 @@
                                     <14>,
                                     <15>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <64>;
+                       dma-channels = <32>;
+                       dma-requests = <64>;
                };
 
                i2c1: i2c@48070000 {
index 60403273f83e6918483641884707c8f5bd546d1e..db80f9d376fadf569655fc9660526b16b0c10739 100644 (file)
        model = "Nokia N900";
        compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
 
+       aliases {
+               i2c0;
+               i2c1 = &i2c1;
+               i2c2 = &i2c2;
+               i2c3 = &i2c3;
+       };
+
        cpus {
                cpu@0 {
                        cpu0-supply = <&vcc>;
                compatible = "smsc,lan91c94";
                interrupt-parent = <&gpio2>;
                interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;  /* gpio54 */
-               reg = <1 0x300 0xf>;            /* 16 byte IO range at offset 0x300 */
+               reg = <1 0 0xf>;                /* 16 byte IO range */
                bank-width = <2>;
                pinctrl-names = "default";
                pinctrl-0 = <&ethernet_pins>;
index 01b71111bd558738595fde6deaa71b05c4c08e9a..f4f78c40b56450160ba566cc25bcb7fac9ca489b 100644 (file)
                                     <14>,
                                     <15>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <96>;
+                       dma-channels = <32>;
+                       dma-requests = <96>;
                };
 
                omap3_pmx_core: pinmux@48002030 {
index 074147cebae49f965b254915240b667bffcf9d21..87401d9f4d8b02314323e5a46ee38e5b0c6e4523 100644 (file)
                                     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <127>;
+                       dma-channels = <32>;
+                       dma-requests = <127>;
                };
 
                gpio1: gpio@4a310000 {
index b321fdf42c9f3c51e3a59d4b51b27a08f4fcaba4..ddff674bd05edd2df3482e35628871628286f2c9 100644 (file)
                                     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <127>;
+                       dma-channels = <32>;
+                       dma-requests = <127>;
                };
 
                gpio1: gpio@4ae10000 {
                                      <0x4A096800 0x40>; /* pll_ctrl */
                                reg-names = "phy_rx", "phy_tx", "pll_ctrl";
                                ctrl-module = <&omap_control_sata>;
-                               clocks = <&sys_clkin>;
-                               clock-names = "sysclk";
+                               clocks = <&sys_clkin>, <&sata_ref_clk>;
+                               clock-names = "sysclk", "refclk";
                                #phy-cells = <0>;
                        };
                };
index fe2af92763129beb8b4b493adbfb847684b4a264..b4544cf11bad1b08e5825dcf6690bb57acc558db 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f802c000 {
-                               compatible = "cdns,at32ap7000-macb", "cdns,macb";
+                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index a6eb5436d26d45f62f336b86fb0be8f06ea560bd..40accc87e3a252580c1008fa041356a40093620e 100644 (file)
                        chan_priority = <1>;
                        block_size = <0xfff>;
                        dma-masters = <2>;
-                       data_width = <3 3 0 0>;
+                       data_width = <3 3>;
                };
 
                dma@eb000000 {
                        chan_allocation_order = <1>;
                        chan_priority = <1>;
                        block_size = <0xfff>;
-                       data_width = <3 3 0 0>;
+                       data_width = <3 3>;
                };
 
                fsmc: flash@b0000000 {
index 8ca3c1a2063deeb0eb426a7b320b269f77e4b04d..5c2925831f2038318258003ae6cd3736da71c0de 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <33>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <35>;
                        status = "disabled";
                };
index 905f84d141f03213947def68787c42f905e2b54b..2fd8988f310c6e25dc2d215a35ed1e633d95f83a 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <33>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
index 4910393d1b09013bcb929dc6325d8ddbd6a7d945..f8818f1edbbef27f16adadc139b8e46ae49823ad 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
index 47e557656993fe99f1ea46667a1c1d27e8a2648f..fa2f403ccf28adf4f6aa10c08978dd3b59b6e709 100644 (file)
                        clock-output-names = "axi";
                };
 
-               ahb1_mux: ahb1_mux@01c20054 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-                       reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
-                       clock-output-names = "ahb1_mux";
-               };
-
                ahb1: ahb1@01c20054 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-ahb-clk";
+                       compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&ahb1_mux>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
                        clock-output-names = "ahb1";
                };
 
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                spi0_clk: clk@01c200a0 {
                        #dma-cells = <1>;
 
                        /* DMA controller requires AHB1 clocked from PLL6 */
-                       assigned-clocks = <&ahb1_mux>;
+                       assigned-clocks = <&ahb1>;
                        assigned-clock-parents = <&pll6 0>;
                };
 
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 8>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 9>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 10>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb1_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 11>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
index 786d491542ac429567fd6db6142406858090f81c..3a8530b79f1c46200d2b7ee8bbe343198c7106d8 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
index dd34527293e465d4739cf4518bbd8b292a61989b..382ebd137ee4fbe97514362eee3f336d4e253047 100644 (file)
                };
 
                /* dummy clock until actually implemented */
-               pll6: pll6_clk {
+               pll5: pll5_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-clock";
-                       clock-frequency = <600000000>;
-                       clock-output-names = "pll6";
+                       clock-frequency = <0>;
+                       clock-output-names = "pll5";
+               };
+
+               pll6: clk@01c20028 {
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun6i-a31-pll6-clk";
+                       reg = <0x01c20028 0x4>;
+                       clocks = <&osc24M>;
+                       clock-output-names = "pll6", "pll6x2";
                };
 
                cpu: cpu_clk@01c20050 {
                        clock-output-names = "axi";
                };
 
-               ahb1_mux: ahb1_mux_clk@01c20054 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-                       reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
-                       clock-output-names = "ahb1_mux";
-               };
-
                ahb1: ahb1_clk@01c20054 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-ahb-clk";
+                       compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&ahb1_mux>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
                        clock-output-names = "apb2";
                };
 
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc0";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc1";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc2";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
+               };
+
+               mbus_clk: clk@01c2015c {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun8i-a23-mbus-clk";
+                       reg = <0x01c2015c 0x4>;
+                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clock-output-names = "mbus";
                };
        };
 
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 8>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 9>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 10>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
index e8a4c955241b88e38dfda2cc91d17b701ca6935a..b7e6b6fba5e0f0d66594ec75d36d2bd2a3d4c90e 100644 (file)
@@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y
 CONFIG_ARCH_STI=y
 CONFIG_ARCH_EXYNOS=y
 CONFIG_EXYNOS5420_MCPM=y
+CONFIG_ARCH_SHMOBILE_MULTI=y
+CONFIG_ARCH_EMEV2=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R8A73A4=y
+CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7779=y
+CONFIG_ARCH_R8A7790=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7794=y
+CONFIG_ARCH_SH73A0=y
+CONFIG_MACH_MARZEN=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
@@ -84,6 +95,8 @@ CONFIG_PCI_KEYSTONE=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
+CONFIG_PCI_RCAR_GEN2=y
+CONFIG_PCI_RCAR_GEN2_PCIE=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=8
@@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
+CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y
 CONFIG_AHCI_TEGRA=y
 CONFIG_SATA_HIGHBANK=y
 CONFIG_SATA_MV=y
+CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_HIX5HD2_GMAC=y
 CONFIG_SUN4I_EMAC=y
@@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
 CONFIG_KS8851=y
 CONFIG_R8169=y
+CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
 CONFIG_MARVELL_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
+CONFIG_MICREL_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
@@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MPU3050=y
 CONFIG_INPUT_AXP20X_PEK=y
+CONFIG_INPUT_ADXL34X=m
 CONFIG_SERIO_AMBAKMI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=20
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
@@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
 CONFIG_I2C_CADENCE=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_GPIO=m
 CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_RIIC=y
 CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SH_MOBILE=y
 CONFIG_I2C_SIRF=y
-CONFIG_I2C_TEGRA=y
 CONFIG_I2C_ST=y
-CONFIG_SPI=y
+CONFIG_I2C_TEGRA=y
 CONFIG_I2C_XILINX=y
-CONFIG_SPI_DAVINCI=y
+CONFIG_I2C_RCAR=y
+CONFIG_SPI=y
 CONFIG_SPI_CADENCE=y
+CONFIG_SPI_DAVINCI=y
 CONFIG_SPI_OMAP24XX=y
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
+CONFIG_SPI_RSPI=y
+CONFIG_SPI_SH_MSIOF=m
+CONFIG_SPI_SH_HSPI=y
 CONFIG_SPI_SIRF=y
 CONFIG_SPI_SUN4I=y
 CONFIG_SPI_SUN6I=y
@@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_GENERIC_PLATFORM=y
-CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_DAVINCI=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_EM=y
+CONFIG_GPIO_RCAR=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCF857X=y
 CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
 CONFIG_GPIO_SYSCON=y
@@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_KEYSTONE=y
 CONFIG_POWER_RESET_SUN6I=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
+CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_DAVINCI_WATCHDOG
 CONFIG_ST_THERMAL_SYSCFG=y
@@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_MESON_WATCHDOG=y
+CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AXP20X=y
@@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_BCM590XX=y
+CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_MFD_SYSCON=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_MAX8907=y
+CONFIG_REGULATOR_MAX8973=y
 CONFIG_REGULATOR_MAX77686=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_S2MPS11=y
@@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y
 CONFIG_REGULATOR_VEXPRESS=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_RENESAS_VSP1=m
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7180=m
 CONFIG_DRM=y
+CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_TEGRA=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
+CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
+CONFIG_FB_SH_MOBILE_MERAM=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_AS3711=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=y
@@ -343,6 +397,8 @@ CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_SH4_FSI=m
+CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_TEGRA=y
 CONFIG_SND_SOC_TEGRA_RT5640=y
 CONFIG_SND_SOC_TEGRA_WM8753=y
@@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
 CONFIG_SND_SOC_TEGRA_ALC5632=y
 CONFIG_SND_SOC_TEGRA_MAX98090=y
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_WM8978=m
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
@@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_CHIPIDEA=y
@@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
 CONFIG_USB_MXS_PHY=y
+CONFIG_USB_RCAR_PHY=m
+CONFIG_USB_RCAR_GEN2_PHY=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_ARMMMCI=y
@@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_MVSDIO=y
-CONFIG_MMC_SUNXI=y
+CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SH_MMCIF=y
+CONFIG_MMC_SUNXI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
@@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_RTC_DRV_MAX8907=y
 CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TWL4030=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
+CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_EM3027=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_VT8500=y
@@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y
 CONFIG_DW_DMAC=y
 CONFIG_MV_XOR=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_SH_DMAE=y
+CONFIG_RCAR_AUDMAC_PP=m
+CONFIG_RCAR_DMAC=y
 CONFIG_STE_DMA40=y
 CONFIG_SIRF_DMA=y
 CONFIG_TI_EDMA=y
@@ -468,6 +539,7 @@ CONFIG_IIO=y
 CONFIG_XILINX_XADC=y
 CONFIG_AK8975=y
 CONFIG_PWM=y
+CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
 CONFIG_PHY_HIX5HD2_SATA=y
index b7386524c356619ceb9889553144e1292d2e1904..a097cffa1231f55b6e7c81ddfbbdc45af3c020ee 100644 (file)
@@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ECC_BCH=y
 CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_OMAP_BCH=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_OMAP2=y
@@ -248,6 +249,7 @@ CONFIG_TWL6040_CORE=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_TI_ABB=y
+CONFIG_REGULATOR_TPS62360=m
 CONFIG_REGULATOR_TPS65023=y
 CONFIG_REGULATOR_TPS6507X=y
 CONFIG_REGULATOR_TPS65217=y
@@ -374,7 +376,7 @@ CONFIG_PWM_TIEHRPWM=m
 CONFIG_PWM_TWL=m
 CONFIG_PWM_TWL_LED=m
 CONFIG_OMAP_USB2=m
-CONFIG_TI_PIPE3=m
+CONFIG_TI_PIPE3=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
index 4767eb9caa78c89fe0c6b447b0d234fb71738285..ce0786efd26c3485d28baa6b7857e35350c1336d 100644 (file)
@@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs)
        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
 }
 
-#define segment_eq(a,b)        ((a) == (b))
+#define segment_eq(a, b)       ((a) == (b))
 
 #define __addr_ok(addr) ({ \
        unsigned long flag; \
@@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs)
        (flag == 0); })
 
 /* We use 33-bit arithmetic here... */
-#define __range_ok(addr,size) ({ \
+#define __range_ok(addr, size) ({ \
        unsigned long flag, roksum; \
        __chk_user_ptr(addr);   \
        __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
@@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *);
 #define __GUP_CLOBBER_32t_8 "lr", "cc"
 #define __GUP_CLOBBER_8        "lr", "cc"
 
-#define __get_user_x(__r2,__p,__e,__l,__s)                             \
+#define __get_user_x(__r2, __p, __e, __l, __s)                         \
           __asm__ __volatile__ (                                       \
                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
                __asmeq("%3", "r1")                                     \
@@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *);
 
 /* narrowing a double-word get into a single 32bit word register: */
 #ifdef __ARMEB__
-#define __get_user_x_32t(__r2, __p, __e, __l, __s)                             \
+#define __get_user_x_32t(__r2, __p, __e, __l, __s)                     \
        __get_user_x(__r2, __p, __e, __l, 32t_8)
 #else
 #define __get_user_x_32t __get_user_x
@@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *);
 #endif
 
 
-#define __get_user_check(x,p)                                                  \
+#define __get_user_check(x, p)                                         \
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
                register const typeof(*(p)) __user *__p asm("r0") = (p);\
@@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *);
                __e;                                                    \
        })
 
-#define get_user(x,p)                                                  \
+#define get_user(x, p)                                                 \
        ({                                                              \
                might_fault();                                          \
-               __get_user_check(x,p);                                  \
+               __get_user_check(x, p);                                 \
         })
 
 extern int __put_user_1(void *, unsigned int);
@@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_8(void *, unsigned long long);
 
-#define __put_user_x(__r2,__p,__e,__l,__s)                             \
+#define __put_user_x(__r2, __p, __e, __l, __s)                         \
           __asm__ __volatile__ (                                       \
                __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
                __asmeq("%3", "r1")                                     \
@@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long);
                : "0" (__p), "r" (__r2), "r" (__l)                      \
                : "ip", "lr", "cc")
 
-#define __put_user_check(x,p)                                                  \
+#define __put_user_check(x, p)                                         \
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
                const typeof(*(p)) __user *__tmp_p = (p);               \
@@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long);
                __e;                                                    \
        })
 
-#define put_user(x,p)                                                  \
+#define put_user(x, p)                                                 \
        ({                                                              \
                might_fault();                                          \
-               __put_user_check(x,p);                                  \
+               __put_user_check(x, p);                                 \
         })
 
 #else /* CONFIG_MMU */
@@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long);
  */
 #define USER_DS                        KERNEL_DS
 
-#define segment_eq(a,b)                (1)
-#define __addr_ok(addr)                ((void)(addr),1)
-#define __range_ok(addr,size)  ((void)(addr),0)
+#define segment_eq(a, b)               (1)
+#define __addr_ok(addr)                ((void)(addr), 1)
+#define __range_ok(addr, size) ((void)(addr), 0)
 #define get_fs()               (KERNEL_DS)
 
 static inline void set_fs(mm_segment_t fs)
 {
 }
 
-#define get_user(x,p)  __get_user(x,p)
-#define put_user(x,p)  __put_user(x,p)
+#define get_user(x, p) __get_user(x, p)
+#define put_user(x, p) __put_user(x, p)
 
 #endif /* CONFIG_MMU */
 
-#define access_ok(type,addr,size)      (__range_ok(addr,size) == 0)
+#define access_ok(type, addr, size)    (__range_ok(addr, size) == 0)
 
 #define user_addr_max() \
        (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
@@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs)
  * error occurs, and leave it unchanged on success.  Note that these
  * versions are void (ie, don't return a value as such).
  */
-#define __get_user(x,ptr)                                              \
+#define __get_user(x, ptr)                                             \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       __get_user_err((x),(ptr),__gu_err);                             \
+       __get_user_err((x), (ptr), __gu_err);                           \
        __gu_err;                                                       \
 })
 
-#define __get_user_error(x,ptr,err)                                    \
+#define __get_user_error(x, ptr, err)                                  \
 ({                                                                     \
-       __get_user_err((x),(ptr),err);                                  \
+       __get_user_err((x), (ptr), err);                                \
        (void) 0;                                                       \
 })
 
-#define __get_user_err(x,ptr,err)                                      \
+#define __get_user_err(x, ptr, err)                                    \
 do {                                                                   \
        unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
-       case 1: __get_user_asm_byte(__gu_val,__gu_addr,err);    break;  \
-       case 2: __get_user_asm_half(__gu_val,__gu_addr,err);    break;  \
-       case 4: __get_user_asm_word(__gu_val,__gu_addr,err);    break;  \
+       case 1: __get_user_asm_byte(__gu_val, __gu_addr, err);  break;  \
+       case 2: __get_user_asm_half(__gu_val, __gu_addr, err);  break;  \
+       case 4: __get_user_asm_word(__gu_val, __gu_addr, err);  break;  \
        default: (__gu_val) = __get_user_bad();                         \
        }                                                               \
        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 } while (0)
 
-#define __get_user_asm_byte(x,addr,err)                                \
+#define __get_user_asm_byte(x, addr, err)                      \
        __asm__ __volatile__(                                   \
        "1:     " TUSER(ldrb) " %1,[%2],#0\n"                   \
        "2:\n"                                                  \
@@ -330,7 +330,7 @@ do {                                                                        \
        : "cc")
 
 #ifndef __ARMEB__
-#define __get_user_asm_half(x,__gu_addr,err)                   \
+#define __get_user_asm_half(x, __gu_addr, err)                 \
 ({                                                             \
        unsigned long __b1, __b2;                               \
        __get_user_asm_byte(__b1, __gu_addr, err);              \
@@ -338,7 +338,7 @@ do {                                                                        \
        (x) = __b1 | (__b2 << 8);                               \
 })
 #else
-#define __get_user_asm_half(x,__gu_addr,err)                   \
+#define __get_user_asm_half(x, __gu_addr, err)                 \
 ({                                                             \
        unsigned long __b1, __b2;                               \
        __get_user_asm_byte(__b1, __gu_addr, err);              \
@@ -347,7 +347,7 @@ do {                                                                        \
 })
 #endif
 
-#define __get_user_asm_word(x,addr,err)                                \
+#define __get_user_asm_word(x, addr, err)                      \
        __asm__ __volatile__(                                   \
        "1:     " TUSER(ldr) "  %1,[%2],#0\n"                   \
        "2:\n"                                                  \
@@ -365,35 +365,35 @@ do {                                                                      \
        : "r" (addr), "i" (-EFAULT)                             \
        : "cc")
 
-#define __put_user(x,ptr)                                              \
+#define __put_user(x, ptr)                                             \
 ({                                                                     \
        long __pu_err = 0;                                              \
-       __put_user_err((x),(ptr),__pu_err);                             \
+       __put_user_err((x), (ptr), __pu_err);                           \
        __pu_err;                                                       \
 })
 
-#define __put_user_error(x,ptr,err)                                    \
+#define __put_user_error(x, ptr, err)                                  \
 ({                                                                     \
-       __put_user_err((x),(ptr),err);                                  \
+       __put_user_err((x), (ptr), err);                                \
        (void) 0;                                                       \
 })
 
-#define __put_user_err(x,ptr,err)                                      \
+#define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
        unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
-       case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);    break;  \
-       case 2: __put_user_asm_half(__pu_val,__pu_addr,err);    break;  \
-       case 4: __put_user_asm_word(__pu_val,__pu_addr,err);    break;  \
-       case 8: __put_user_asm_dword(__pu_val,__pu_addr,err);   break;  \
+       case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);  break;  \
+       case 2: __put_user_asm_half(__pu_val, __pu_addr, err);  break;  \
+       case 4: __put_user_asm_word(__pu_val, __pu_addr, err);  break;  \
+       case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break;  \
        default: __put_user_bad();                                      \
        }                                                               \
 } while (0)
 
-#define __put_user_asm_byte(x,__pu_addr,err)                   \
+#define __put_user_asm_byte(x, __pu_addr, err)                 \
        __asm__ __volatile__(                                   \
        "1:     " TUSER(strb) " %1,[%2],#0\n"                   \
        "2:\n"                                                  \
@@ -411,22 +411,22 @@ do {                                                                      \
        : "cc")
 
 #ifndef __ARMEB__
-#define __put_user_asm_half(x,__pu_addr,err)                   \
+#define __put_user_asm_half(x, __pu_addr, err)                 \
 ({                                                             \
-       unsigned long __temp = (unsigned long)(x);              \
+       unsigned long __temp = (__force unsigned long)(x);      \
        __put_user_asm_byte(__temp, __pu_addr, err);            \
        __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);   \
 })
 #else
-#define __put_user_asm_half(x,__pu_addr,err)                   \
+#define __put_user_asm_half(x, __pu_addr, err)                 \
 ({                                                             \
-       unsigned long __temp = (unsigned long)(x);              \
+       unsigned long __temp = (__force unsigned long)(x);      \
        __put_user_asm_byte(__temp >> 8, __pu_addr, err);       \
        __put_user_asm_byte(__temp, __pu_addr + 1, err);        \
 })
 #endif
 
-#define __put_user_asm_word(x,__pu_addr,err)                   \
+#define __put_user_asm_word(x, __pu_addr, err)                 \
        __asm__ __volatile__(                                   \
        "1:     " TUSER(str) "  %1,[%2],#0\n"                   \
        "2:\n"                                                  \
@@ -451,7 +451,7 @@ do {                                                                        \
 #define        __reg_oper1     "%R2"
 #endif
 
-#define __put_user_asm_dword(x,__pu_addr,err)                  \
+#define __put_user_asm_dword(x, __pu_addr, err)                        \
        __asm__ __volatile__(                                   \
  ARM(  "1:     " TUSER(str) "  " __reg_oper1 ", [%1], #4\n"    ) \
  ARM(  "2:     " TUSER(str) "  " __reg_oper0 ", [%1]\n"        ) \
@@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
 #else
-#define __copy_from_user(to,from,n)    (memcpy(to, (void __force *)from, n), 0)
-#define __copy_to_user(to,from,n)      (memcpy((void __force *)to, from, n), 0)
-#define __clear_user(addr,n)           (memset((void __force *)addr, 0, n), 0)
+#define __copy_from_user(to, from, n)  (memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to, from, n)    (memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr, n)          (memset((void __force *)addr, 0, n), 0)
 #endif
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
index dd9acc95ebc0ef3df443ac9e48b3aa4b06d7b73b..61b53c46edfa7556827ae552082e79ece19df909 100644 (file)
@@ -231,7 +231,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 /*
  * PMU platform driver and devicetree bindings.
  */
-static struct of_device_id cpu_pmu_of_device_ids[] = {
+static const struct of_device_id cpu_pmu_of_device_ids[] = {
        {.compatible = "arm,cortex-a17-pmu",    .data = armv7_a17_pmu_init},
        {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
        {.compatible = "arm,cortex-a12-pmu",    .data = armv7_a12_pmu_init},
index 8423be76080eaa293e8f1d36f740548ddbad6973..52241207a82a3d3a802a90c1fe47e78150e8d5ba 100644 (file)
@@ -2,5 +2,7 @@ config MACH_ASM9260
        bool "Alphascale ASM9260"
        depends on ARCH_MULTI_V5
        select CPU_ARM926T
+       select ASM9260_TIMER
+       select GENERIC_CLOCKEVENTS
        help
          Support for Alphascale ASM9260 based platform.
index c6740e359a44e253abea81a8fac9549e7994804f..c74a44324e5bc3dc7cb4b5e0f31e64ca8c167f5f 100644 (file)
@@ -64,7 +64,6 @@ config SOC_SAMA5D4
        select SOC_SAMA5
        select CLKSRC_MMIO
        select CACHE_L2X0
-       select CACHE_PL310
        select HAVE_FB_ATMEL
        select HAVE_AT91_UTMI
        select HAVE_AT91_SMD
index 51761f8927b7a4959468ad0df8c4ddf86565303a..b00d09555f2b7662cb3262ace823a11ccb3fd5e5 100644 (file)
@@ -183,7 +183,7 @@ static struct clock_event_device clkevt = {
 void __iomem *at91_st_base;
 EXPORT_SYMBOL_GPL(at91_st_base);
 
-static struct of_device_id at91rm9200_st_timer_ids[] = {
+static const struct of_device_id at91rm9200_st_timer_ids[] = {
        { .compatible = "atmel,at91rm9200-st" },
        { /* sentinel */ }
 };
index a6e726a6e0b578dc0c6e6dfaa2c996d3baaa59bf..583369ffc284d5588b4c7f4c742cedbefb7614f3 100644 (file)
@@ -35,10 +35,10 @@ extern void __init at91sam9260_pm_init(void);
 extern void __init at91sam9g45_pm_init(void);
 extern void __init at91sam9x5_pm_init(void);
 #else
-void __init at91rm9200_pm_init(void) { }
-void __init at91sam9260_pm_init(void) { }
-void __init at91sam9g45_pm_init(void) { }
-void __init at91sam9x5_pm_init(void) { }
+static inline void __init at91rm9200_pm_init(void) { }
+static inline void __init at91sam9260_pm_init(void) { }
+static inline void __init at91sam9g45_pm_init(void) { }
+static inline void __init at91sam9x5_pm_init(void) { }
 #endif
 
 #endif /* _AT91_GENERIC_H */
index af8d8afc2e12f05d34a395f10408acdec2d8917f..5e34fb1433098aee3916f2b4c3019a14d2aa5ba8 100644 (file)
@@ -226,7 +226,7 @@ void at91_pm_set_standby(void (*at91_standby)(void))
        }
 }
 
-static struct of_device_id ramc_ids[] = {
+static const struct of_device_id ramc_ids[] __initconst = {
        { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
        { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
        { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
@@ -234,7 +234,7 @@ static struct of_device_id ramc_ids[] = {
        { /*sentinel*/ }
 };
 
-static void at91_dt_ramc(void)
+static __init void at91_dt_ramc(void)
 {
        struct device_node *np;
        const struct of_device_id *of_id;
index 19e5a1d9539751c8fba373a88e3f31f558ffd773..4db76a493c5a68e066aa9f664112570dddef5cf3 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <asm/mach/arch.h>
 
-static const char *axxia_dt_match[] __initconst = {
+static const char *const axxia_dt_match[] __initconst = {
        "lsi,axm5516",
        "lsi,axm5516-sim",
        "lsi,axm5516-emu",
index aaeec78c3ec4d05df74531a36245d21c4b9e2dd8..8b11f44bb36e5a3dcfe59cf331e18730e71c9ec5 100644 (file)
@@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE
          This enables support for systems based on Broadcom mobile SoCs.
 
 config ARCH_BCM_281XX
-       bool "Broadcom BCM281XX SoC family"
+       bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7
        select ARCH_BCM_MOBILE
        select HAVE_SMP
        help
@@ -77,7 +77,7 @@ config ARCH_BCM_281XX
          variants.
 
 config ARCH_BCM_21664
-       bool "Broadcom BCM21664 SoC family"
+       bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7
        select ARCH_BCM_MOBILE
        select HAVE_SMP
        help
index 60a5afa06ed7f90428729c2daa1ef59beeeb1c0d..3a60f7ee3f0cc1583788f9cd3da81a5723354647 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
-static const char *brcmstb_match[] __initconst = {
+static const char *const brcmstb_match[] __initconst = {
        "brcm,bcm7445",
        "brcm,brcmstb",
        NULL
index 584e8d4e28926956bed6971713d8f2f3758f59a7..cd30f6f5f2ff15723a2abae07b471b03f65f9b11 100644 (file)
@@ -32,12 +32,14 @@ config ARCH_DAVINCI_DM646x
 
 config ARCH_DAVINCI_DA830
        bool "DA830/OMAP-L137/AM17x based system"
+       depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
        select ARCH_DAVINCI_DA8XX
        select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
        select CP_INTC
 
 config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
+       depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
        select ARCH_DAVINCI_DA8XX
        select CP_INTC
 
index f703d82f08a80d22adc3e8a1f45acbd7fbfa846c..438f68547f4c79ea4a12b0bf82bd85583c02be62 100644 (file)
@@ -20,7 +20,7 @@
 
 #define DA8XX_NUM_UARTS        3
 
-static struct of_device_id da8xx_irq_match[] __initdata = {
+static const struct of_device_id da8xx_irq_match[] __initconst = {
        { .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
        { }
 };
index a8eb909a2b6ccd94dd7f66c0c11370afc9503184..6a2ff0a654a5b53efce08a40c60a1d25f603b166 100644 (file)
@@ -30,7 +30,7 @@ static void __iomem *pinmux_base;
 /*
  * Sets the DAVINCI MUX register based on the table
  */
-int __init_or_module davinci_cfg_reg(const unsigned long index)
+int davinci_cfg_reg(const unsigned long index)
 {
        static DEFINE_SPINLOCK(mux_spin_lock);
        struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -101,7 +101,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
 }
 EXPORT_SYMBOL(davinci_cfg_reg);
 
-int __init_or_module davinci_cfg_reg_list(const short pins[])
+int davinci_cfg_reg_list(const short pins[])
 {
        int i, error = -EINVAL;
 
index 2013f73797ed6c24d8c6f5f5cd850b96cb1f9869..9e9dfdfad9d77fd670fd186d61907f503815cde3 100644 (file)
@@ -227,7 +227,7 @@ static void __init exynos_dt_machine_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static char const *exynos_dt_compat[] __initconst = {
+static char const *const exynos_dt_compat[] __initconst = {
        "samsung,exynos3",
        "samsung,exynos3250",
        "samsung,exynos4",
index 666ec3e5b03fbc05592bf66d82b4bd8d311c41de..52e2b1a2fddbfcf7485d1c504c4e66cec3609388 100644 (file)
@@ -587,7 +587,7 @@ static struct exynos_pm_data exynos5420_pm_data = {
        .cpu_suspend    = exynos5420_cpu_suspend,
 };
 
-static struct of_device_id exynos_pmu_of_device_ids[] = {
+static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
        {
                .compatible = "samsung,exynos3250-pmu",
                .data = &exynos3250_pm_data,
index 07a09570175d8b034082a3639ddfa01999bf59d3..231fba0d03e5135466aa55bd765f684b8900084e 100644 (file)
@@ -169,7 +169,7 @@ static void __init highbank_init(void)
                platform_device_register(&highbank_cpuidle_device);
 }
 
-static const char *highbank_match[] __initconst = {
+static const char *const highbank_match[] __initconst = {
        "calxeda,highbank",
        "calxeda,ecx-2000",
        NULL,
index 76b907078b58d365a1acaa73ac85e98e9862833f..c6bd7c7bd4aa4949203505247f0fd9db21ea0c5b 100644 (file)
@@ -45,7 +45,7 @@ static void __init hi3620_map_io(void)
        iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc));
 }
 
-static const char *hi3xxx_compat[] __initconst = {
+static const char *const hi3xxx_compat[] __initconst = {
        "hisilicon,hi3620-hi4511",
        NULL,
 };
@@ -55,7 +55,7 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)")
        .dt_compat      = hi3xxx_compat,
 MACHINE_END
 
-static const char *hix5hd2_compat[] __initconst = {
+static const char *const hix5hd2_compat[] __initconst = {
        "hisilicon,hix5hd2",
        NULL,
 };
@@ -64,7 +64,7 @@ DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)")
        .dt_compat      = hix5hd2_compat,
 MACHINE_END
 
-static const char *hip04_compat[] __initconst = {
+static const char *const hip04_compat[] __initconst = {
        "hisilicon,hip04-d01",
        NULL,
 };
@@ -73,7 +73,7 @@ DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)")
        .dt_compat      = hip04_compat,
 MACHINE_END
 
-static const char *hip01_compat[] __initconst = {
+static const char *const hip01_compat[] __initconst = {
        "hisilicon,hip01",
        "hisilicon,hip01-ca9x2",
        NULL,
index a377f95033aecc6edaab43a7d012df3997b27017..0411f0664c15c0bce0469b9cd073205834ba366a 100644 (file)
@@ -68,7 +68,7 @@ int imx_mmdc_get_ddr_type(void)
        return ddr_type;
 }
 
-static struct of_device_id imx_mmdc_dt_ids[] = {
+static const struct of_device_id imx_mmdc_dt_ids[] = {
        { .compatible = "fsl,imx6q-mmdc", },
        { /* sentinel */ }
 };
index 6a722860e34dc2a4fb0d79f095656067d191bd4a..b024390199639ca31201730ccdbcfceae577be2b 100644 (file)
@@ -245,8 +245,10 @@ static inline void outb(u8 value, u32 addr)
 }
 
 #define outsb outsb
-static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
+static inline void outsb(u32 io_addr, const void *p, u32 count)
 {
+       const u8 *vaddr = p;
+
        while (count--)
                outb(*vaddr++, io_addr);
 }
@@ -262,8 +264,9 @@ static inline void outw(u16 value, u32 addr)
 }
 
 #define outsw outsw
-static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
+static inline void outsw(u32 io_addr, const void *p, u32 count)
 {
+       const u16 *vaddr = p;
        while (count--)
                outw(cpu_to_le16(*vaddr++), io_addr);
 }
@@ -275,8 +278,9 @@ static inline void outl(u32 value, u32 addr)
 }
 
 #define outsl outsl
-static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
+static inline void outsl(u32 io_addr, const void *p, u32 count)
 {
+       const u32 *vaddr = p;
        while (count--)
                outl(cpu_to_le32(*vaddr++), io_addr);
 }
@@ -294,8 +298,9 @@ static inline u8 inb(u32 addr)
 }
 
 #define insb insb
-static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
+static inline void insb(u32 io_addr, void *p, u32 count)
 {
+       u8 *vaddr = p;
        while (count--)
                *vaddr++ = inb(io_addr);
 }
@@ -313,8 +318,9 @@ static inline u16 inw(u32 addr)
 }
 
 #define insw insw
-static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
+static inline void insw(u32 io_addr, void *p, u32 count)
 {
+       u16 *vaddr = p;
        while (count--)
                *vaddr++ = le16_to_cpu(inw(io_addr));
 }
@@ -330,8 +336,9 @@ static inline u32 inl(u32 addr)
 }
 
 #define insl insl
-static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
+static inline void insl(u32 io_addr, void *p, u32 count)
 {
+       u32 *vaddr = p;
        while (count--)
                *vaddr++ = le32_to_cpu(inl(io_addr));
 }
index 7f352de2609909ad6c5087aaa203f744876fd4b7..06620875813ae93c76dee1628c94d5439e94783e 100644 (file)
@@ -103,7 +103,7 @@ static void __init keystone_init_meminfo(void)
        pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
 }
 
-static const char *keystone_match[] __initconst = {
+static const char *const keystone_match[] __initconst = {
        "ti,keystone",
        NULL,
 };
index ef6041e7e6754c5daf8c07d7f91309f32ce4f4b1..41bebfd296dcbacac7a9d0c60747d5b1c01c73d4 100644 (file)
@@ -61,7 +61,7 @@ static struct pm_clk_notifier_block platform_domain_notifier = {
        .pm_domain = &keystone_pm_domain,
 };
 
-static struct of_device_id of_keystone_table[] = {
+static const struct of_device_id of_keystone_table[] = {
        {.compatible = "ti,keystone"},
        { /* end of list */ },
 };
index 2756351dbb35acac76190f3342a92e006d77c9d6..10bfa03e58d4777a03377391a63f8a69ec2e3c78 100644 (file)
@@ -213,7 +213,7 @@ void __init timer_init(int irq)
 }
 
 #ifdef CONFIG_OF
-static struct of_device_id mmp_timer_dt_ids[] = {
+static const struct of_device_id mmp_timer_dt_ids[] = {
        { .compatible = "mrvl,mmp-timer", },
        {}
 };
index 61bfe584a9d7fad4a7204d3ddb2a0a42afe3a23b..fc832040c6e979f139e272c73a47d167a65d8df5 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/input.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
        [1] = {
                .start  = MSM_GPIO_TO_INT(49),
                .end    = MSM_GPIO_TO_INT(49),
-               .flags  = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
        },
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static struct platform_device *devices[] __initdata = {
index 4c748616ef47eb9de63d751072e8d779fc3ffe44..10016a3bc69826351830ea53f822c7a8e3c4f033 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/usb/msm_hsusb.h>
 #include <linux/err.h>
 #include <linux/clkdev.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
                .flags = IORESOURCE_MEM,
        },
        [1] = {
-               .flags = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
        },
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static int __init msm_init_smc91x(void)
index b5895f040caaf73a9f304f91c0fca2533a9dcc86..e46e9ea1e187ecfbbeee56c68a882c68076f28b5 100644 (file)
@@ -51,7 +51,7 @@ enum {
        COHERENCY_FABRIC_TYPE_ARMADA_380,
 };
 
-static struct of_device_id of_coherency_table[] = {
+static const struct of_device_id of_coherency_table[] = {
        {.compatible = "marvell,coherency-fabric",
         .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
        {.compatible = "marvell,armada-375-coherency-fabric",
index d8ab605a44fa7f80a87edbd2ffeadd386903ea16..8b9f5e202ccf67d34681a2e1966fc424680a417f 100644 (file)
@@ -104,7 +104,7 @@ static void __iomem *pmsu_mp_base;
 
 static void *mvebu_cpu_resume;
 
-static struct of_device_id of_pmsu_table[] = {
+static const struct of_device_id of_pmsu_table[] = {
        { .compatible = "marvell,armada-370-pmsu", },
        { .compatible = "marvell,armada-370-xp-pmsu", },
        { .compatible = "marvell,armada-380-pmsu", },
index a068cb5c2ce809c285d254a3f6fb924bb9f4ed30..c6c132acd7a61a052e27dce226d5b8bb62648307 100644 (file)
@@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
                return -ENODEV;
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
 void mvebu_armada375_smp_wa_init(void)
 {
        u32 dev, rev;
index 3d24ebf120953d936fa5785fdcd199fcfbf8f1ba..3445a5686805e082d6464aa35df2bb0389b3e276 100644 (file)
@@ -27,7 +27,7 @@
 #include "mmio.h"
 #include "clcd.h"
 
-static const char *nspire_dt_match[] __initconst = {
+static const char *const nspire_dt_match[] __initconst = {
        "ti,nspire",
        "ti,nspire-cx",
        "ti,nspire-tp",
index 00d5d8f9f150ed8e8a475c9103bdc74da2f3cdf1..b83f18fcec9b3c693944a6a2d0aafccf8e660a75 100644 (file)
@@ -190,7 +190,7 @@ obj-$(CONFIG_SOC_OMAP2430)          += clock2430.o
 obj-$(CONFIG_ARCH_OMAP3)               += $(clock-common) clock3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock34xx.o clkt34xx_dpll3m2.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock3517.o clock36xx.o
-obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o cclock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += clkt_iclk.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common)
 obj-$(CONFIG_ARCH_OMAP4)               += dpll3xxx.o dpll44xx.o
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
deleted file mode 100644 (file)
index e79c80b..0000000
+++ /dev/null
@@ -1,3688 +0,0 @@
-/*
- * OMAP3 clock data
- *
- * Copyright (C) 2007-2012 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com>
- * With many device clock fixes by Kevin Hilman and Jouni Högander
- * DPLL bypass clock support added by Roman Tereshonkov
- *
- */
-
-/*
- * Virtual clocks are introduced as convenient tools.
- * They are sources for other clocks and not supposed
- * to be requested from drivers directly.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/clk-private.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock3xxx.h"
-#include "clock34xx.h"
-#include "clock36xx.h"
-#include "clock3517.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-#include "prm3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "control.h"
-
-/*
- * clocks
- */
-
-#define OMAP_CM_REGADDR                OMAP34XX_CM_REGADDR
-
-/* Maximum DPLL multiplier, divider values for OMAP3 */
-#define OMAP3_MAX_DPLL_MULT            2047
-#define OMAP3630_MAX_JTYPE_DPLL_MULT   4095
-#define OMAP3_MAX_DPLL_DIV             128
-
-DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0);
-
-static const char *osc_sys_ck_parent_names[] = {
-       "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck",
-       "virt_38_4m_ck", "virt_16_8m_ck",
-};
-
-DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0,
-              OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT,
-              OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0,
-                  OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT,
-                  OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct dpll_data dpll3_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-       .mult_mask      = OMAP3430_CORE_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_CORE_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_CORE_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_CORE_DPLL_MASK,
-       .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_CORE_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_CORE_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_CORE_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll3_ck;
-
-static const char *dpll3_ck_parent_names[] = {
-       "sys_ck",
-       "sys_ck",
-};
-
-static const struct clk_ops dpll3_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll3_ck_hw = {
-       .hw = {
-               .clk = &dpll3_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll3_dd,
-       .clkdm_name     = "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-                  OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk core_ck;
-
-static const char *core_ck_parent_names[] = {
-       "dpll3_m2_ck",
-};
-
-static const struct clk_ops core_ck_ops = {};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL);
-DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
-
-DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0,
-                  OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk security_l4_ick2;
-
-static const char *security_l4_ick2_parent_names[] = {
-       "l4_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL);
-DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops);
-
-static struct clk aes1_ick;
-
-static const char *aes1_ick_parent_names[] = {
-       "security_l4_ick2",
-};
-
-static const struct clk_ops aes1_ick_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes1_ick_hw = {
-       .hw = {
-               .clk = &aes1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_AES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk core_l4_ick;
-
-static const struct clk_ops core_l4_ick_ops = {
-       .init           = &omap2_init_clk_clkdm,
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk aes2_ick;
-
-static const char *aes2_ick_parent_names[] = {
-       "core_l4_ick",
-};
-
-static const struct clk_ops aes2_ick_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes2_ick_hw = {
-       .hw = {
-               .clk = &aes2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_AES2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk dpll1_fck;
-
-static struct dpll_data dpll1_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-       .mult_mask      = OMAP3430_MPU_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_MPU_DPLL_DIV_MASK,
-       .clk_bypass     = &dpll1_fck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_MPU_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
-       .enable_mask    = OMAP3430_EN_MPU_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_MPU_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-       .autoidle_mask  = OMAP3430_AUTO_MPU_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-       .idlest_mask    = OMAP3430_ST_MPU_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll1_ck;
-
-static const struct clk_ops dpll1_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .set_parent     = &omap3_noncore_dpll_set_parent,
-       .set_rate_and_parent    = &omap3_noncore_dpll_set_rate_and_parent,
-       .determine_rate = &omap3_noncore_dpll_determine_rate,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll1_ck_hw = {
-       .hw = {
-               .clk = &dpll1_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll1_dd,
-       .clkdm_name     = "dpll1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
-                  OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk mpu_ck;
-
-static const char *mpu_ck_parent_names[] = {
-       "dpll1_x2m2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm");
-DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-                  OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH,
-                  0x0, NULL);
-
-static struct clk cam_ick;
-
-static struct clk_hw_omap cam_ick_hw = {
-       .hw = {
-               .clk = &cam_ick,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_CAM_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-/* DPLL4 */
-/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
-/* Type: DPLL */
-static struct dpll_data dpll4_dd;
-
-static struct dpll_data dpll4_dd_34xx __initdata = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-       .mult_mask      = OMAP3430_PERIPH_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_PERIPH_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_PERIPH_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_PERIPH_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_PERIPH_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct dpll_data dpll4_dd_3630 __initdata = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-       .mult_mask      = OMAP3630_PERIPH_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_PERIPH_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_PERIPH_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_PERIPH_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_PERIPH_CLK_MASK,
-       .dco_mask       = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
-       .sddiv_mask     = OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
-       .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-       .flags          = DPLL_J_TYPE
-};
-
-static struct clk dpll4_ck;
-
-static const struct clk_ops dpll4_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .set_rate       = &omap3_dpll4_set_rate,
-       .set_parent     = &omap3_noncore_dpll_set_parent,
-       .set_rate_and_parent    = &omap3_dpll4_set_rate_and_parent,
-       .determine_rate = &omap3_noncore_dpll_determine_rate,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll4_ck_hw = {
-       .hw = {
-               .clk = &dpll4_ck,
-       },
-       .dpll_data      = &dpll4_dd,
-       .ops            = &clkhwops_omap3_dpll,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops);
-
-static const struct clk_div_table dpll4_mx_ck_div_table[] = {
-       { .div = 1, .val = 1 },
-       { .div = 2, .val = 2 },
-       { .div = 3, .val = 3 },
-       { .div = 4, .val = 4 },
-       { .div = 5, .val = 5 },
-       { .div = 6, .val = 6 },
-       { .div = 7, .val = 7 },
-       { .div = 8, .val = 8 },
-       { .div = 9, .val = 9 },
-       { .div = 10, .val = 10 },
-       { .div = 11, .val = 11 },
-       { .div = 12, .val = 12 },
-       { .div = 13, .val = 13 },
-       { .div = 14, .val = 14 },
-       { .div = 15, .val = 15 },
-       { .div = 16, .val = 16 },
-       { .div = 17, .val = 17 },
-       { .div = 18, .val = 18 },
-       { .div = 19, .val = 19 },
-       { .div = 20, .val = 20 },
-       { .div = 21, .val = 21 },
-       { .div = 22, .val = 22 },
-       { .div = 23, .val = 23 },
-       { .div = 24, .val = 24 },
-       { .div = 25, .val = 25 },
-       { .div = 26, .val = 26 },
-       { .div = 27, .val = 27 },
-       { .div = 28, .val = 28 },
-       { .div = 29, .val = 29 },
-       { .div = 30, .val = 30 },
-       { .div = 31, .val = 31 },
-       { .div = 32, .val = 32 },
-       { .div = 0 },
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m5x2_ck;
-
-static const char *dpll4_m5x2_ck_parent_names[] = {
-       "dpll4_m5_ck",
-};
-
-static const struct clk_ops dpll4_m5x2_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .set_rate       = &omap3_clkoutx2_set_rate,
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-       .round_rate     = &omap3_clkoutx2_round_rate,
-};
-
-static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
-       .disable        = &omap2_dflt_clk_disable,
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-};
-
-static struct clk_hw_omap dpll4_m5x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m5x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_CAM_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
-                       dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m5x2_ck_3630 = {
-       .name           = "dpll4_m5x2_ck",
-       .hw             = &dpll4_m5x2_ck_hw.hw,
-       .parent_names   = dpll4_m5x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-static struct clk cam_mclk;
-
-static const char *cam_mclk_parent_names[] = {
-       "dpll4_m5x2_ck",
-};
-
-static struct clk_hw_omap cam_mclk_hw = {
-       .hw = {
-               .clk = &cam_mclk,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_CAM_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-static struct clk cam_mclk = {
-       .name           = "cam_mclk",
-       .hw             = &cam_mclk_hw.hw,
-       .parent_names   = cam_mclk_parent_names,
-       .num_parents    = ARRAY_SIZE(cam_mclk_parent_names),
-       .ops            = &aes2_ick_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-static const struct clksel_rate clkout2_src_core_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_sys_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_96m_rates[] = {
-       { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
-                  OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m2x2_ck;
-
-static const char *dpll4_m2x2_ck_parent_names[] = {
-       "dpll4_m2_ck",
-};
-
-static struct clk_hw_omap dpll4_m2x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m2x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_96M_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m2x2_ck_3630 = {
-       .name           = "dpll4_m2x2_ck",
-       .hw             = &dpll4_m2x2_ck_hw.hw,
-       .parent_names   = dpll4_m2x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m2x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-static struct clk omap_96m_alwon_fck;
-
-static const char *omap_96m_alwon_fck_parent_names[] = {
-       "dpll4_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names,
-                 core_ck_ops);
-
-static struct clk cm_96m_fck;
-
-static const char *cm_96m_fck_parent_names[] = {
-       "omap_96m_alwon_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL);
-DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops);
-
-static const struct clksel_rate clkout2_src_54m_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH,
-                  0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m3x2_ck;
-
-static const char *dpll4_m3x2_ck_parent_names[] = {
-       "dpll4_m3_ck",
-};
-
-static struct clk_hw_omap dpll4_m3x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m3x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_TV_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m3x2_ck_3630 = {
-       .name           = "dpll4_m3x2_ck",
-       .hw             = &dpll4_m3x2_ck_hw.hw,
-       .parent_names   = dpll4_m3x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m3x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-static const char *omap_54m_fck_parent_names[] = {
-       "dpll4_m3x2_ck", "sys_altclk",
-};
-
-DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT,
-              OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL);
-
-static const struct clksel clkout2_src_clksel[] = {
-       { .parent = &core_ck, .rates = clkout2_src_core_rates },
-       { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
-       { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
-       { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
-       { .parent = NULL },
-};
-
-static const char *clkout2_src_ck_parent_names[] = {
-       "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck",
-};
-
-static const struct clk_ops clkout2_src_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm",
-                        clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL,
-                        OMAP3430_CLKOUT2SOURCE_MASK,
-                        OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT,
-                        NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops);
-
-static const struct clksel_rate omap_48m_cm96m_rates[] = {
-       { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate omap_48m_alt_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel omap_48m_clksel[] = {
-       { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
-       { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
-       { .parent = NULL },
-};
-
-static const char *omap_48m_fck_parent_names[] = {
-       "cm_96m_fck", "sys_altclk",
-};
-
-static struct clk omap_48m_fck;
-
-static const struct clk_ops omap_48m_fck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-static struct clk_hw_omap omap_48m_fck_hw = {
-       .hw = {
-               .clk = &omap_48m_fck,
-       },
-       .clksel         = omap_48m_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_SOURCE_48M_MASK,
-};
-
-DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4);
-
-static struct clk core_12m_fck;
-
-static const char *core_12m_fck_parent_names[] = {
-       "omap_12m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_48m_fck;
-
-static const char *core_48m_fck_parent_names[] = {
-       "omap_48m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static const char *omap_96m_fck_parent_names[] = {
-       "cm_96m_fck", "sys_ck",
-};
-
-DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-              OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL);
-
-static struct clk core_96m_fck;
-
-static const char *core_96m_fck_parent_names[] = {
-       "omap_96m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_l3_ick;
-
-static const char *core_l3_ick_parent_names[] = {
-       "l3_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm");
-DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1);
-
-static struct clk corex2_fck;
-
-static const char *corex2_fck_parent_names[] = {
-       "dpll3_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
-DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
-
-static const char *cpefuse_fck_parent_names[] = {
-       "sys_ck",
-};
-
-static struct clk cpefuse_fck;
-
-static struct clk_hw_omap cpefuse_fck_hw = {
-       .hw = {
-               .clk = &cpefuse_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_CPEFUSE_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk csi2_96m_fck;
-
-static const char *csi2_96m_fck_parent_names[] = {
-       "core_96m_fck",
-};
-
-static struct clk_hw_omap csi2_96m_fck_hw = {
-       .hw = {
-               .clk = &csi2_96m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_CSI2_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk d2d_26m_fck;
-
-static struct clk_hw_omap d2d_26m_fck_hw = {
-       .hw = {
-               .clk = &d2d_26m_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_D2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk des1_ick;
-
-static struct clk_hw_omap des1_ick_hw = {
-       .hw = {
-               .clk = &des1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_DES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk des2_ick;
-
-static struct clk_hw_omap des2_ick_hw = {
-       .hw = {
-               .clk = &des2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_DES2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-                  OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll2_fck;
-
-static struct dpll_data dpll2_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-       .mult_mask      = OMAP3430_IVA2_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_IVA2_DPLL_DIV_MASK,
-       .clk_bypass     = &dpll2_fck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
-       .enable_mask    = OMAP3430_EN_IVA2_DPLL_MASK,
-       .modes          = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
-                          (1 << DPLL_LOW_POWER_BYPASS)),
-       .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-       .autoidle_mask  = OMAP3430_AUTO_IVA2_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
-       .idlest_mask    = OMAP3430_ST_IVA2_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll2_ck;
-
-static struct clk_hw_omap dpll2_ck_hw = {
-       .hw = {
-               .clk = &dpll2_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll2_dd,
-       .clkdm_name     = "dpll2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-                  OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL),
-                  OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll3_m3x2_ck;
-
-static const char *dpll3_m3x2_ck_parent_names[] = {
-       "dpll3_m3_ck",
-};
-
-static struct clk_hw_omap dpll3_m3x2_ck_hw = {
-       .hw = {
-               .clk = &dpll3_m3x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_EMU_CORE_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll3_m3x2_ck_3630 = {
-       .name           = "dpll3_m3x2_ck",
-       .hw             = &dpll3_m3x2_ck_hw.hw,
-       .parent_names   = dpll3_m3x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll3_m3x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH,
-                  0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m4x2_ck;
-
-static const char *dpll4_m4x2_ck_parent_names[] = {
-       "dpll4_m4_ck",
-};
-
-static struct clk_hw_omap dpll4_m4x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m4x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_DSS1_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names,
-               dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m4x2_ck_3630 = {
-       .name           = "dpll4_m4x2_ck",
-       .hw             = &dpll4_m4x2_ck_hw.hw,
-       .parent_names   = dpll4_m4x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m4x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m6x2_ck;
-
-static const char *dpll4_m6x2_ck_parent_names[] = {
-       "dpll4_m6_ck",
-};
-
-static struct clk_hw_omap dpll4_m6x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m6x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m6x2_ck_3630 = {
-       .name           = "dpll4_m6x2_ck",
-       .hw             = &dpll4_m6x2_ck_hw.hw,
-       .parent_names   = dpll4_m6x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m6x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1);
-
-static struct dpll_data dpll5_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
-       .mult_mask      = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
-       .enable_mask    = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
-       .autoidle_mask  = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
-       .idlest_mask    = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll5_ck;
-
-static struct clk_hw_omap dpll5_ck_hw = {
-       .hw = {
-               .clk = &dpll5_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll5_dd,
-       .clkdm_name     = "dpll5_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
-                  OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dss1_alwon_fck_3430es1;
-
-static const char *dss1_alwon_fck_3430es1_parent_names[] = {
-       "dpll4_m4x2_ck",
-};
-
-static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = {
-       .hw = {
-               .clk = &dss1_alwon_fck_3430es1,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS1_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1,
-               dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-               CLK_SET_RATE_PARENT);
-
-static struct clk dss1_alwon_fck_3430es2;
-
-static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = {
-       .hw = {
-               .clk = &dss1_alwon_fck_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS1_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2,
-               dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-               CLK_SET_RATE_PARENT);
-
-static struct clk dss2_alwon_fck;
-
-static struct clk_hw_omap dss2_alwon_fck_hw = {
-       .hw = {
-               .clk = &dss2_alwon_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS2_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_96m_fck;
-
-static struct clk_hw_omap dss_96m_fck_hw = {
-       .hw = {
-               .clk = &dss_96m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_TV_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es1;
-
-static struct clk_hw_omap dss_ick_3430es1_hw = {
-       .hw = {
-               .clk = &dss_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es2;
-
-static struct clk_hw_omap dss_ick_3430es2_hw = {
-       .hw = {
-               .clk = &dss_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_tv_fck;
-
-static const char *dss_tv_fck_parent_names[] = {
-       "omap_54m_fck",
-};
-
-static struct clk_hw_omap dss_tv_fck_hw = {
-       .hw = {
-               .clk = &dss_tv_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_TV_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops);
-
-static struct clk emac_fck;
-
-static const char *emac_fck_parent_names[] = {
-       "rmii_ck",
-};
-
-static struct clk_hw_omap emac_fck_hw = {
-       .hw = {
-               .clk = &emac_fck,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_CPGMAC_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops);
-
-static struct clk ipss_ick;
-
-static const char *ipss_ick_parent_names[] = {
-       "core_l3_ick",
-};
-
-static struct clk_hw_omap ipss_ick_hw = {
-       .hw = {
-               .clk = &ipss_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = AM35XX_EN_IPSS_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk emac_ick;
-
-static const char *emac_ick_parent_names[] = {
-       "ipss_ick",
-};
-
-static struct clk_hw_omap emac_ick_hw = {
-       .hw = {
-               .clk = &emac_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk emu_core_alwon_ck;
-
-static const char *emu_core_alwon_ck_parent_names[] = {
-       "dpll3_m3x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm");
-DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names,
-                 core_l4_ick_ops);
-
-static struct clk emu_mpu_alwon_ck;
-
-static const char *emu_mpu_alwon_ck_parent_names[] = {
-       "mpu_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL);
-DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops);
-
-static struct clk emu_per_alwon_ck;
-
-static const char *emu_per_alwon_ck_parent_names[] = {
-       "dpll4_m6x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm");
-DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names,
-                 core_l4_ick_ops);
-
-static const char *emu_src_ck_parent_names[] = {
-       "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck",
-};
-
-static const struct clksel_rate emu_src_sys_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_core_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_per_rates[] = {
-       { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_mpu_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel emu_src_clksel[] = {
-       { .parent = &sys_ck,            .rates = emu_src_sys_rates },
-       { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
-       { .parent = &emu_per_alwon_ck,  .rates = emu_src_per_rates },
-       { .parent = &emu_mpu_alwon_ck,  .rates = emu_src_mpu_rates },
-       { .parent = NULL },
-};
-
-static const struct clk_ops emu_src_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-       .enable         = &omap2_clkops_enable_clkdm,
-       .disable        = &omap2_clkops_disable_clkdm,
-};
-
-static struct clk emu_src_ck;
-
-static struct clk_hw_omap emu_src_ck_hw = {
-       .hw = {
-               .clk = &emu_src_ck,
-       },
-       .clksel         = emu_src_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_MUX_CTRL_MASK,
-       .clkdm_name     = "emu_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk fac_ick;
-
-static struct clk_hw_omap fac_ick_hw = {
-       .hw = {
-               .clk = &fac_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_FAC_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk fshostusb_fck;
-
-static const char *fshostusb_fck_parent_names[] = {
-       "core_48m_fck",
-};
-
-static struct clk_hw_omap fshostusb_fck_hw = {
-       .hw = {
-               .clk = &fshostusb_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ck;
-
-static struct clk_hw_omap gfx_l3_ck_hw = {
-       .hw = {
-               .clk = &gfx_l3_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP_EN_GFX_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0,
-                  OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
-                  OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk gfx_cg1_ck;
-
-static const char *gfx_cg1_ck_parent_names[] = {
-       "gfx_l3_fck",
-};
-
-static struct clk_hw_omap gfx_cg1_ck_hw = {
-       .hw = {
-               .clk = &gfx_cg1_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES1_EN_2D_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_cg2_ck;
-
-static struct clk_hw_omap gfx_cg2_ck_hw = {
-       .hw = {
-               .clk = &gfx_cg2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES1_EN_3D_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ick;
-
-static const char *gfx_l3_ick_parent_names[] = {
-       "gfx_l3_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm");
-DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops);
-
-static struct clk wkup_32k_fck;
-
-static const char *wkup_32k_fck_parent_names[] = {
-       "omap_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_dbck;
-
-static const char *gpio1_dbck_parent_names[] = {
-       "wkup_32k_fck",
-};
-
-static struct clk_hw_omap gpio1_dbck_hw = {
-       .hw = {
-               .clk = &gpio1_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wkup_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_ick;
-
-static const char *gpio1_ick_parent_names[] = {
-       "wkup_l4_ick",
-};
-
-static struct clk_hw_omap gpio1_ick_hw = {
-       .hw = {
-               .clk = &gpio1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_32k_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names,
-                 core_l4_ick_ops);
-
-static struct clk gpio2_dbck;
-
-static const char *gpio2_dbck_parent_names[] = {
-       "per_32k_alwon_fck",
-};
-
-static struct clk_hw_omap gpio2_dbck_hw = {
-       .hw = {
-               .clk = &gpio2_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk per_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm");
-DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk gpio2_ick;
-
-static const char *gpio2_ick_parent_names[] = {
-       "per_l4_ick",
-};
-
-static struct clk_hw_omap gpio2_ick_hw = {
-       .hw = {
-               .clk = &gpio2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_dbck;
-
-static struct clk_hw_omap gpio3_dbck_hw = {
-       .hw = {
-               .clk = &gpio3_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_ick;
-
-static struct clk_hw_omap gpio3_ick_hw = {
-       .hw = {
-               .clk = &gpio3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_dbck;
-
-static struct clk_hw_omap gpio4_dbck_hw = {
-       .hw = {
-               .clk = &gpio4_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_ick;
-
-static struct clk_hw_omap gpio4_ick_hw = {
-       .hw = {
-               .clk = &gpio4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_dbck;
-
-static struct clk_hw_omap gpio5_dbck_hw = {
-       .hw = {
-               .clk = &gpio5_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_ick;
-
-static struct clk_hw_omap gpio5_ick_hw = {
-       .hw = {
-               .clk = &gpio5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_dbck;
-
-static struct clk_hw_omap gpio6_dbck_hw = {
-       .hw = {
-               .clk = &gpio6_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_ick;
-
-static struct clk_hw_omap gpio6_ick_hw = {
-       .hw = {
-               .clk = &gpio6_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpmc_fck;
-
-static struct clk_hw_omap gpmc_fck_hw = {
-       .hw = {
-               .clk = &gpmc_fck,
-       },
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops);
-
-static const struct clksel omap343x_gpt_clksel[] = {
-       { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
-       { .parent = &sys_ck, .rates = gpt_sys_rates },
-       { .parent = NULL },
-};
-
-static const char *gpt10_fck_parent_names[] = {
-       "omap_32k_fck", "sys_ck",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT10_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt10_ick;
-
-static struct clk_hw_omap gpt10_ick_hw = {
-       .hw = {
-               .clk = &gpt10_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_GPT10_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT11_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt11_ick;
-
-static struct clk_hw_omap gpt11_ick_hw = {
-       .hw = {
-               .clk = &gpt11_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_GPT11_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpt12_fck;
-
-static const char *gpt12_fck_parent_names[] = {
-       "secure_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpt12_ick;
-
-static struct clk_hw_omap gpt12_ick_hw = {
-       .hw = {
-               .clk = &gpt12_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT12_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT1_MASK,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt1_ick;
-
-static struct clk_hw_omap gpt1_ick_hw = {
-       .hw = {
-               .clk = &gpt1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT2_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt2_ick;
-
-static struct clk_hw_omap gpt2_ick_hw = {
-       .hw = {
-               .clk = &gpt2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT3_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt3_ick;
-
-static struct clk_hw_omap gpt3_ick_hw = {
-       .hw = {
-               .clk = &gpt3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT4_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt4_ick;
-
-static struct clk_hw_omap gpt4_ick_hw = {
-       .hw = {
-               .clk = &gpt4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT5_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt5_ick;
-
-static struct clk_hw_omap gpt5_ick_hw = {
-       .hw = {
-               .clk = &gpt5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT6_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt6_ick;
-
-static struct clk_hw_omap gpt6_ick_hw = {
-       .hw = {
-               .clk = &gpt6_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT7_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt7_ick;
-
-static struct clk_hw_omap gpt7_ick_hw = {
-       .hw = {
-               .clk = &gpt7_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT7_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT8_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt8_ick;
-
-static struct clk_hw_omap gpt8_ick_hw = {
-       .hw = {
-               .clk = &gpt8_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT8_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT9_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt9_ick;
-
-static struct clk_hw_omap gpt9_ick_hw = {
-       .hw = {
-               .clk = &gpt9_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT9_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hdq_fck;
-
-static const char *hdq_fck_parent_names[] = {
-       "core_12m_fck",
-};
-
-static struct clk_hw_omap hdq_fck_hw = {
-       .hw = {
-               .clk = &hdq_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_HDQ_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops);
-
-static struct clk hdq_ick;
-
-static struct clk_hw_omap hdq_ick_hw = {
-       .hw = {
-               .clk = &hdq_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HDQ_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hecc_ck;
-
-static struct clk_hw_omap hecc_ck_hw = {
-       .hw = {
-               .clk = &hecc_ck,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_HECC_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_fck_am35xx;
-
-static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
-       .hw = {
-               .clk = &hsotgusb_fck_am35xx,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_USBOTG_FCLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es1;
-
-static struct clk_hw_omap hsotgusb_ick_3430es1_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HSOTGUSB_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es2;
-
-static struct clk_hw_omap hsotgusb_ick_3430es2_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_hsotgusb_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HSOTGUSB_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_am35xx;
-
-static struct clk_hw_omap hsotgusb_ick_am35xx_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_am35xx,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_USBOTG_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_fck;
-
-static struct clk_hw_omap i2c1_fck_hw = {
-       .hw = {
-               .clk = &i2c1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_ick;
-
-static struct clk_hw_omap i2c1_ick_hw = {
-       .hw = {
-               .clk = &i2c1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_fck;
-
-static struct clk_hw_omap i2c2_fck_hw = {
-       .hw = {
-               .clk = &i2c2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_ick;
-
-static struct clk_hw_omap i2c2_ick_hw = {
-       .hw = {
-               .clk = &i2c2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_fck;
-
-static struct clk_hw_omap i2c3_fck_hw = {
-       .hw = {
-               .clk = &i2c3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_ick;
-
-static struct clk_hw_omap i2c3_ick_hw = {
-       .hw = {
-               .clk = &i2c3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk icr_ick;
-
-static struct clk_hw_omap icr_ick_hw = {
-       .hw = {
-               .clk = &icr_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_ICR_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk iva2_ck;
-
-static const char *iva2_ck_parent_names[] = {
-       "dpll2_m2_ck",
-};
-
-static struct clk_hw_omap iva2_ck_hw = {
-       .hw = {
-               .clk = &iva2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
-       .clkdm_name     = "iva2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops);
-
-static struct clk mad2d_ick;
-
-static struct clk_hw_omap mad2d_ick_hw = {
-       .hw = {
-               .clk = &mad2d_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-       .enable_bit     = OMAP3430_EN_MAD2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk mailboxes_ick;
-
-static struct clk_hw_omap mailboxes_ick_hw = {
-       .hw = {
-               .clk = &mailboxes_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MAILBOXES_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel mcbsp_15_clksel[] = {
-       { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
-       { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-       { .parent = NULL },
-};
-
-static const char *mcbsp1_fck_parent_names[] = {
-       "core_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-                        OMAP2_MCBSP1_CLKS_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait,
-                        mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp1_ick;
-
-static struct clk_hw_omap mcbsp1_ick_hw = {
-       .hw = {
-               .clk = &mcbsp1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCBSP1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_96m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops);
-
-static const struct clksel mcbsp_234_clksel[] = {
-       { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
-       { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-       { .parent = NULL },
-};
-
-static const char *mcbsp2_fck_parent_names[] = {
-       "per_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-                        OMAP2_MCBSP2_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp2_ick;
-
-static struct clk_hw_omap mcbsp2_ick_hw = {
-       .hw = {
-               .clk = &mcbsp2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP3_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp3_ick;
-
-static struct clk_hw_omap mcbsp3_ick_hw = {
-       .hw = {
-               .clk = &mcbsp3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP4_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp4_ick;
-
-static struct clk_hw_omap mcbsp4_ick_hw = {
-       .hw = {
-               .clk = &mcbsp4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP5_CLKS_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait,
-                        mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp5_ick;
-
-static struct clk_hw_omap mcbsp5_ick_hw = {
-       .hw = {
-               .clk = &mcbsp5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCBSP5_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_fck;
-
-static struct clk_hw_omap mcspi1_fck_hw = {
-       .hw = {
-               .clk = &mcspi1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_ick;
-
-static struct clk_hw_omap mcspi1_ick_hw = {
-       .hw = {
-               .clk = &mcspi1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_fck;
-
-static struct clk_hw_omap mcspi2_fck_hw = {
-       .hw = {
-               .clk = &mcspi2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_ick;
-
-static struct clk_hw_omap mcspi2_ick_hw = {
-       .hw = {
-               .clk = &mcspi2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_fck;
-
-static struct clk_hw_omap mcspi3_fck_hw = {
-       .hw = {
-               .clk = &mcspi3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_ick;
-
-static struct clk_hw_omap mcspi3_ick_hw = {
-       .hw = {
-               .clk = &mcspi3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_fck;
-
-static struct clk_hw_omap mcspi4_fck_hw = {
-       .hw = {
-               .clk = &mcspi4_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_ick;
-
-static struct clk_hw_omap mcspi4_ick_hw = {
-       .hw = {
-               .clk = &mcspi4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_fck;
-
-static struct clk_hw_omap mmchs1_fck_hw = {
-       .hw = {
-               .clk = &mmchs1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_ick;
-
-static struct clk_hw_omap mmchs1_ick_hw = {
-       .hw = {
-               .clk = &mmchs1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_fck;
-
-static struct clk_hw_omap mmchs2_fck_hw = {
-       .hw = {
-               .clk = &mmchs2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_ick;
-
-static struct clk_hw_omap mmchs2_ick_hw = {
-       .hw = {
-               .clk = &mmchs2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_fck;
-
-static struct clk_hw_omap mmchs3_fck_hw = {
-       .hw = {
-               .clk = &mmchs3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES2_EN_MMC3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_ick;
-
-static struct clk_hw_omap mmchs3_ick_hw = {
-       .hw = {
-               .clk = &mmchs3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430ES2_EN_MMC3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk modem_fck;
-
-static struct clk_hw_omap modem_fck_hw = {
-       .hw = {
-               .clk = &modem_fck,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MODEM_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_fck;
-
-static struct clk_hw_omap mspro_fck_hw = {
-       .hw = {
-               .clk = &mspro_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MSPRO_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_ick;
-
-static struct clk_hw_omap mspro_ick_hw = {
-       .hw = {
-               .clk = &mspro_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MSPRO_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk omap_192m_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names,
-                 core_ck_ops);
-
-static struct clk omap_32ksync_ick;
-
-static struct clk_hw_omap omap_32ksync_ick_hw = {
-       .hw = {
-               .clk = &omap_32ksync_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_32KSYNC_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_36XX },
-       { .div = 2, .val = 2, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel omap_96m_alwon_fck_clksel[] = {
-       { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
-       { .parent = NULL }
-};
-
-static struct clk omap_96m_alwon_fck_3630;
-
-static const char *omap_96m_alwon_fck_3630_parent_names[] = {
-       "omap_192m_alwon_fck",
-};
-
-static const struct clk_ops omap_96m_alwon_fck_3630_ops = {
-       .set_rate       = &omap2_clksel_set_rate,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = {
-       .hw = {
-               .clk = &omap_96m_alwon_fck_3630,
-       },
-       .clksel         = omap_96m_alwon_fck_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-       .clksel_mask    = OMAP3630_CLKSEL_96M_MASK,
-};
-
-static struct clk omap_96m_alwon_fck_3630 = {
-       .name   = "omap_96m_alwon_fck",
-       .hw     = &omap_96m_alwon_fck_3630_hw.hw,
-       .parent_names   = omap_96m_alwon_fck_3630_parent_names,
-       .num_parents    = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names),
-       .ops    = &omap_96m_alwon_fck_3630_ops,
-};
-
-static struct clk omapctrl_ick;
-
-static struct clk_hw_omap omapctrl_ick_hw = {
-       .hw = {
-               .clk = &omapctrl_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_OMAPCTRL_SHIFT,
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk per_48m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk security_l3_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL);
-DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops);
-
-static struct clk pka_ick;
-
-static const char *pka_ick_parent_names[] = {
-       "security_l3_ick",
-};
-
-static struct clk_hw_omap pka_ick_hw = {
-       .hw = {
-               .clk = &pka_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_PKA_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0,
-                  OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk rng_ick;
-
-static struct clk_hw_omap rng_ick_hw = {
-       .hw = {
-               .clk = &rng_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_RNG_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sad2d_ick;
-
-static struct clk_hw_omap sad2d_ick_hw = {
-       .hw = {
-               .clk = &sad2d_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SAD2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sdrc_ick;
-
-static struct clk_hw_omap sdrc_ick_hw = {
-       .hw = {
-               .clk = &sdrc_ick,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SDRC_SHIFT,
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate sgx_core_rates[] = {
-       { .div = 2, .val = 5, .flags = RATE_IN_36XX },
-       { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_96m_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_192m_rates[] = {
-       { .div = 1, .val = 4, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_corex2_rates[] = {
-       { .div = 3, .val = 6, .flags = RATE_IN_36XX },
-       { .div = 5, .val = 7, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel sgx_clksel[] = {
-       { .parent = &core_ck, .rates = sgx_core_rates },
-       { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
-       { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
-       { .parent = &corex2_fck, .rates = sgx_corex2_rates },
-       { .parent = NULL },
-};
-
-static const char *sgx_fck_parent_names[] = {
-       "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck",
-};
-
-static struct clk sgx_fck;
-
-static const struct clk_ops sgx_fck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel,
-                        OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
-                        OMAP3430ES2_CLKSEL_SGX_MASK,
-                        OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
-                        OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
-                        &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops);
-
-static struct clk sgx_ick;
-
-static struct clk_hw_omap sgx_ick_hw = {
-       .hw = {
-               .clk = &sgx_ick,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
-       .clkdm_name     = "sgx_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sha11_ick;
-
-static struct clk_hw_omap sha11_ick_hw = {
-       .hw = {
-               .clk = &sha11_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_SHA11_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sha12_ick;
-
-static struct clk_hw_omap sha12_ick_hw = {
-       .hw = {
-               .clk = &sha12_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SHA12_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk sr1_fck;
-
-static struct clk_hw_omap sr1_fck_hw = {
-       .hw = {
-               .clk = &sr1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_SR1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr2_fck;
-
-static struct clk_hw_omap sr2_fck_hw = {
-       .hw = {
-               .clk = &sr2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_SR2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_ick_3430es1;
-
-static const char *ssi_ick_3430es1_parent_names[] = {
-       "ssi_l4_ick",
-};
-
-static struct clk_hw_omap ssi_ick_3430es1_hw = {
-       .hw = {
-               .clk = &ssi_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SSI_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static struct clk ssi_ick_3430es2;
-
-static struct clk_hw_omap ssi_ick_3430es2_hw = {
-       .hw = {
-               .clk = &ssi_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_ssi_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SSI_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate ssi_ssr_corex2_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-       { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_clksel[] = {
-       { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
-       { .parent = NULL },
-};
-
-static const char *ssi_ssr_fck_3430es1_parent_names[] = {
-       "corex2_fck",
-};
-
-static const struct clk_ops ssi_ssr_fck_3430es1_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm",
-                        ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_SSI_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_SSI_SHIFT,
-                        NULL, ssi_ssr_fck_3430es1_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm",
-                        ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_SSI_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_SSI_SHIFT,
-                        NULL, ssi_ssr_fck_3430es1_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1",
-                       &ssi_ssr_fck_3430es1, 0x0, 1, 2);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2",
-                       &ssi_ssr_fck_3430es2, 0x0, 1, 2);
-
-static struct clk sys_clkout1;
-
-static const char *sys_clkout1_parent_names[] = {
-       "osc_sys_ck",
-};
-
-static struct clk_hw_omap sys_clkout1_hw = {
-       .hw = {
-               .clk = &sys_clkout1,
-       },
-       .enable_reg     = OMAP3430_PRM_CLKOUT_CTRL,
-       .enable_bit     = OMAP3430_CLKOUT_EN_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0,
-                  OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT,
-                  OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-              OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_TRACECLK_SHIFT,
-                  OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk ts_fck;
-
-static struct clk_hw_omap ts_fck_hw = {
-       .hw = {
-               .clk = &ts_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_TS_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_fck;
-
-static struct clk_hw_omap uart1_fck_hw = {
-       .hw = {
-               .clk = &uart1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_UART1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_ick;
-
-static struct clk_hw_omap uart1_ick_hw = {
-       .hw = {
-               .clk = &uart1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_UART1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart2_fck;
-
-static struct clk_hw_omap uart2_fck_hw = {
-       .hw = {
-               .clk = &uart2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_UART2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart2_ick;
-
-static struct clk_hw_omap uart2_ick_hw = {
-       .hw = {
-               .clk = &uart2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_UART2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart3_fck;
-
-static const char *uart3_fck_parent_names[] = {
-       "per_48m_fck",
-};
-
-static struct clk_hw_omap uart3_fck_hw = {
-       .hw = {
-               .clk = &uart3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_UART3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart3_ick;
-
-static struct clk_hw_omap uart3_ick_hw = {
-       .hw = {
-               .clk = &uart3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_UART3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck;
-
-static struct clk_hw_omap uart4_fck_hw = {
-       .hw = {
-               .clk = &uart4_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3630_EN_UART4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck_am35xx;
-
-static struct clk_hw_omap uart4_fck_am35xx_hw = {
-       .hw = {
-               .clk = &uart4_fck_am35xx,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = AM35XX_EN_UART4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick;
-
-static struct clk_hw_omap uart4_ick_hw = {
-       .hw = {
-               .clk = &uart4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3630_EN_UART4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick_am35xx;
-
-static struct clk_hw_omap uart4_ick_am35xx_hw = {
-       .hw = {
-               .clk = &uart4_ick_am35xx,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = AM35XX_EN_UART4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate div2_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel usb_l4_clksel[] = {
-       { .parent = &l4_ick, .rates = div2_rates },
-       { .parent = NULL },
-};
-
-static const char *usb_l4_ick_parent_names[] = {
-       "l4_ick",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-                        OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-                        &clkhwops_iclk_wait, usb_l4_ick_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-static struct clk usbhost_120m_fck;
-
-static const char *usbhost_120m_fck_parent_names[] = {
-       "dpll5_m2_ck",
-};
-
-static struct clk_hw_omap usbhost_120m_fck_hw = {
-       .hw = {
-               .clk = &usbhost_120m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST2_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names,
-                 aes2_ick_ops);
-
-static struct clk usbhost_48m_fck;
-
-static struct clk_hw_omap usbhost_48m_fck_hw = {
-       .hw = {
-               .clk = &usbhost_48m_fck,
-       },
-       .ops            = &clkhwops_omap3430es2_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST1_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbhost_ick;
-
-static struct clk_hw_omap usbhost_ick_hw = {
-       .hw = {
-               .clk = &usbhost_ick,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_fck;
-
-static struct clk_hw_omap usbtll_fck_hw = {
-       .hw = {
-               .clk = &usbtll_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_ick;
-
-static struct clk_hw_omap usbtll_ick_hw = {
-       .hw = {
-               .clk = &usbtll_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate usim_96m_rates[] = {
-       { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
-       { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate usim_120m_rates[] = {
-       { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-       { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
-       { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel usim_clksel[] = {
-       { .parent = &omap_96m_fck, .rates = usim_96m_rates },
-       { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
-       { .parent = &sys_ck, .rates = div2_rates },
-       { .parent = NULL },
-};
-
-static const char *usim_fck_parent_names[] = {
-       "omap_96m_fck", "dpll5_m2_ck", "sys_ck",
-};
-
-static struct clk usim_fck;
-
-static const struct clk_ops usim_fck_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                        OMAP3430ES2_CLKSEL_USIMOCP_MASK,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-                        OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait,
-                        usim_fck_parent_names, usim_fck_ops);
-
-static struct clk usim_ick;
-
-static struct clk_hw_omap usim_ick_hw = {
-       .hw = {
-               .clk = &usim_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USIMOCP_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk vpfe_fck;
-
-static const char *vpfe_fck_parent_names[] = {
-       "pclk_ck",
-};
-
-static struct clk_hw_omap vpfe_fck_hw = {
-       .hw = {
-               .clk = &vpfe_fck,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_VPFE_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops);
-
-static struct clk vpfe_ick;
-
-static struct clk_hw_omap vpfe_ick_hw = {
-       .hw = {
-               .clk = &vpfe_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_VPFE_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt1_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk wdt1_ick;
-
-static struct clk_hw_omap wdt1_ick_hw = {
-       .hw = {
-               .clk = &wdt1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_fck;
-
-static struct clk_hw_omap wdt2_fck_hw = {
-       .hw = {
-               .clk = &wdt2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_WDT2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_ick;
-
-static struct clk_hw_omap wdt2_ick_hw = {
-       .hw = {
-               .clk = &wdt2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_fck;
-
-static struct clk_hw_omap wdt3_fck_hw = {
-       .hw = {
-               .clk = &wdt3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_WDT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_ick;
-
-static struct clk_hw_omap wdt3_ick_hw = {
-       .hw = {
-               .clk = &wdt3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-/*
- * clocks specific to omap3430es1
- */
-static struct omap_clk omap3430es1_clks[] = {
-       CLK(NULL,       "gfx_l3_ck",    &gfx_l3_ck),
-       CLK(NULL,       "gfx_l3_fck",   &gfx_l3_fck),
-       CLK(NULL,       "gfx_l3_ick",   &gfx_l3_ick),
-       CLK(NULL,       "gfx_cg1_ck",   &gfx_cg1_ck),
-       CLK(NULL,       "gfx_cg2_ck",   &gfx_cg2_ck),
-       CLK(NULL,       "d2d_26m_fck",  &d2d_26m_fck),
-       CLK(NULL,       "fshostusb_fck", &fshostusb_fck),
-       CLK(NULL,       "ssi_ssr_fck",  &ssi_ssr_fck_3430es1),
-       CLK(NULL,       "ssi_sst_fck",  &ssi_sst_fck_3430es1),
-       CLK("musb-omap2430",    "ick",  &hsotgusb_ick_3430es1),
-       CLK(NULL,       "hsotgusb_ick", &hsotgusb_ick_3430es1),
-       CLK(NULL,       "fac_ick",      &fac_ick),
-       CLK(NULL,       "ssi_ick",      &ssi_ick_3430es1),
-       CLK(NULL,       "usb_l4_ick",   &usb_l4_ick),
-       CLK(NULL,       "dss1_alwon_fck",       &dss1_alwon_fck_3430es1),
-       CLK("omapdss_dss",      "ick",          &dss_ick_3430es1),
-       CLK(NULL,       "dss_ick",              &dss_ick_3430es1),
-};
-
-/*
- * clocks specific to am35xx
- */
-static struct omap_clk am35xx_clks[] = {
-       CLK(NULL,       "ipss_ick",     &ipss_ick),
-       CLK(NULL,       "rmii_ck",      &rmii_ck),
-       CLK(NULL,       "pclk_ck",      &pclk_ck),
-       CLK(NULL,       "emac_ick",     &emac_ick),
-       CLK(NULL,       "emac_fck",     &emac_fck),
-       CLK("davinci_emac.0",   NULL,   &emac_ick),
-       CLK("davinci_mdio.0",   NULL,   &emac_fck),
-       CLK("vpfe-capture",     "master",       &vpfe_ick),
-       CLK("vpfe-capture",     "slave",        &vpfe_fck),
-       CLK(NULL,       "hsotgusb_ick",         &hsotgusb_ick_am35xx),
-       CLK(NULL,       "hsotgusb_fck",         &hsotgusb_fck_am35xx),
-       CLK(NULL,       "hecc_ck",      &hecc_ck),
-       CLK(NULL,       "uart4_ick",    &uart4_ick_am35xx),
-       CLK(NULL,       "uart4_fck",    &uart4_fck_am35xx),
-};
-
-/*
- * clocks specific to omap36xx
- */
-static struct omap_clk omap36xx_clks[] = {
-       CLK(NULL,       "omap_192m_alwon_fck", &omap_192m_alwon_fck),
-       CLK(NULL,       "uart4_fck",    &uart4_fck),
-};
-
-/*
- * clocks common to omap36xx omap34xx
- */
-static struct omap_clk omap34xx_omap36xx_clks[] = {
-       CLK(NULL,       "aes1_ick",     &aes1_ick),
-       CLK("omap_rng", "ick",          &rng_ick),
-       CLK("omap3-rom-rng",    "ick",  &rng_ick),
-       CLK(NULL,       "sha11_ick",    &sha11_ick),
-       CLK(NULL,       "des1_ick",     &des1_ick),
-       CLK(NULL,       "cam_mclk",     &cam_mclk),
-       CLK(NULL,       "cam_ick",      &cam_ick),
-       CLK(NULL,       "csi2_96m_fck", &csi2_96m_fck),
-       CLK(NULL,       "security_l3_ick", &security_l3_ick),
-       CLK(NULL,       "pka_ick",      &pka_ick),
-       CLK(NULL,       "icr_ick",      &icr_ick),
-       CLK("omap-aes", "ick",  &aes2_ick),
-       CLK("omap-sham",        "ick",  &sha12_ick),
-       CLK(NULL,       "des2_ick",     &des2_ick),
-       CLK(NULL,       "mspro_ick",    &mspro_ick),
-       CLK(NULL,       "mailboxes_ick", &mailboxes_ick),
-       CLK(NULL,       "ssi_l4_ick",   &ssi_l4_ick),
-       CLK(NULL,       "sr1_fck",      &sr1_fck),
-       CLK(NULL,       "sr2_fck",      &sr2_fck),
-       CLK(NULL,       "sr_l4_ick",    &sr_l4_ick),
-       CLK(NULL,       "security_l4_ick2", &security_l4_ick2),
-       CLK(NULL,       "wkup_l4_ick",  &wkup_l4_ick),
-       CLK(NULL,       "dpll2_fck",    &dpll2_fck),
-       CLK(NULL,       "iva2_ck",      &iva2_ck),
-       CLK(NULL,       "modem_fck",    &modem_fck),
-       CLK(NULL,       "sad2d_ick",    &sad2d_ick),
-       CLK(NULL,       "mad2d_ick",    &mad2d_ick),
-       CLK(NULL,       "mspro_fck",    &mspro_fck),
-       CLK(NULL,       "dpll2_ck",     &dpll2_ck),
-       CLK(NULL,       "dpll2_m2_ck",  &dpll2_m2_ck),
-};
-
-/*
- * clocks common to omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_omap3430es2plus_clks[] = {
-       CLK(NULL,       "ssi_ssr_fck",  &ssi_ssr_fck_3430es2),
-       CLK(NULL,       "ssi_sst_fck",  &ssi_sst_fck_3430es2),
-       CLK("musb-omap2430",    "ick",  &hsotgusb_ick_3430es2),
-       CLK(NULL,       "hsotgusb_ick", &hsotgusb_ick_3430es2),
-       CLK(NULL,       "ssi_ick",      &ssi_ick_3430es2),
-       CLK(NULL,       "usim_fck",     &usim_fck),
-       CLK(NULL,       "usim_ick",     &usim_ick),
-};
-
-/*
- * clocks common to am35xx omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
-       CLK(NULL,       "virt_16_8m_ck", &virt_16_8m_ck),
-       CLK(NULL,       "dpll5_ck",     &dpll5_ck),
-       CLK(NULL,       "dpll5_m2_ck",  &dpll5_m2_ck),
-       CLK(NULL,       "sgx_fck",      &sgx_fck),
-       CLK(NULL,       "sgx_ick",      &sgx_ick),
-       CLK(NULL,       "cpefuse_fck",  &cpefuse_fck),
-       CLK(NULL,       "ts_fck",       &ts_fck),
-       CLK(NULL,       "usbtll_fck",   &usbtll_fck),
-       CLK(NULL,       "usbtll_ick",   &usbtll_ick),
-       CLK("omap_hsmmc.2",     "ick",  &mmchs3_ick),
-       CLK(NULL,       "mmchs3_ick",   &mmchs3_ick),
-       CLK(NULL,       "mmchs3_fck",   &mmchs3_fck),
-       CLK(NULL,       "dss1_alwon_fck",       &dss1_alwon_fck_3430es2),
-       CLK("omapdss_dss",      "ick",          &dss_ick_3430es2),
-       CLK(NULL,       "dss_ick",              &dss_ick_3430es2),
-       CLK(NULL,       "usbhost_120m_fck", &usbhost_120m_fck),
-       CLK(NULL,       "usbhost_48m_fck", &usbhost_48m_fck),
-       CLK(NULL,       "usbhost_ick",  &usbhost_ick),
-};
-
-/*
- * common clocks
- */
-static struct omap_clk omap3xxx_clks[] = {
-       CLK(NULL,       "apb_pclk",     &dummy_apb_pclk),
-       CLK(NULL,       "omap_32k_fck", &omap_32k_fck),
-       CLK(NULL,       "virt_12m_ck",  &virt_12m_ck),
-       CLK(NULL,       "virt_13m_ck",  &virt_13m_ck),
-       CLK(NULL,       "virt_19200000_ck", &virt_19200000_ck),
-       CLK(NULL,       "virt_26000000_ck", &virt_26000000_ck),
-       CLK(NULL,       "virt_38_4m_ck", &virt_38_4m_ck),
-       CLK(NULL,       "osc_sys_ck",   &osc_sys_ck),
-       CLK("twl",      "fck",          &osc_sys_ck),
-       CLK(NULL,       "sys_ck",       &sys_ck),
-       CLK(NULL,       "omap_96m_alwon_fck", &omap_96m_alwon_fck),
-       CLK("etb",      "emu_core_alwon_ck", &emu_core_alwon_ck),
-       CLK(NULL,       "sys_altclk",   &sys_altclk),
-       CLK(NULL,       "mcbsp_clks",   &mcbsp_clks),
-       CLK(NULL,       "sys_clkout1",  &sys_clkout1),
-       CLK(NULL,       "dpll1_ck",     &dpll1_ck),
-       CLK(NULL,       "dpll1_x2_ck",  &dpll1_x2_ck),
-       CLK(NULL,       "dpll1_x2m2_ck", &dpll1_x2m2_ck),
-       CLK(NULL,       "dpll3_ck",     &dpll3_ck),
-       CLK(NULL,       "core_ck",      &core_ck),
-       CLK(NULL,       "dpll3_x2_ck",  &dpll3_x2_ck),
-       CLK(NULL,       "dpll3_m2_ck",  &dpll3_m2_ck),
-       CLK(NULL,       "dpll3_m2x2_ck", &dpll3_m2x2_ck),
-       CLK(NULL,       "dpll3_m3_ck",  &dpll3_m3_ck),
-       CLK(NULL,       "dpll3_m3x2_ck", &dpll3_m3x2_ck),
-       CLK(NULL,       "dpll4_ck",     &dpll4_ck),
-       CLK(NULL,       "dpll4_x2_ck",  &dpll4_x2_ck),
-       CLK(NULL,       "omap_96m_fck", &omap_96m_fck),
-       CLK(NULL,       "cm_96m_fck",   &cm_96m_fck),
-       CLK(NULL,       "omap_54m_fck", &omap_54m_fck),
-       CLK(NULL,       "omap_48m_fck", &omap_48m_fck),
-       CLK(NULL,       "omap_12m_fck", &omap_12m_fck),
-       CLK(NULL,       "dpll4_m2_ck",  &dpll4_m2_ck),
-       CLK(NULL,       "dpll4_m2x2_ck", &dpll4_m2x2_ck),
-       CLK(NULL,       "dpll4_m3_ck",  &dpll4_m3_ck),
-       CLK(NULL,       "dpll4_m3x2_ck", &dpll4_m3x2_ck),
-       CLK(NULL,       "dpll4_m4_ck",  &dpll4_m4_ck),
-       CLK(NULL,       "dpll4_m4x2_ck", &dpll4_m4x2_ck),
-       CLK(NULL,       "dpll4_m5_ck",  &dpll4_m5_ck),
-       CLK(NULL,       "dpll4_m5x2_ck", &dpll4_m5x2_ck),
-       CLK(NULL,       "dpll4_m6_ck",  &dpll4_m6_ck),
-       CLK(NULL,       "dpll4_m6x2_ck", &dpll4_m6x2_ck),
-       CLK("etb",      "emu_per_alwon_ck", &emu_per_alwon_ck),
-       CLK(NULL,       "clkout2_src_ck", &clkout2_src_ck),
-       CLK(NULL,       "sys_clkout2",  &sys_clkout2),
-       CLK(NULL,       "corex2_fck",   &corex2_fck),
-       CLK(NULL,       "dpll1_fck",    &dpll1_fck),
-       CLK(NULL,       "mpu_ck",       &mpu_ck),
-       CLK(NULL,       "arm_fck",      &arm_fck),
-       CLK("etb",      "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
-       CLK(NULL,       "l3_ick",       &l3_ick),
-       CLK(NULL,       "l4_ick",       &l4_ick),
-       CLK(NULL,       "rm_ick",       &rm_ick),
-       CLK(NULL,       "gpt10_fck",    &gpt10_fck),
-       CLK(NULL,       "gpt11_fck",    &gpt11_fck),
-       CLK(NULL,       "core_96m_fck", &core_96m_fck),
-       CLK(NULL,       "mmchs2_fck",   &mmchs2_fck),
-       CLK(NULL,       "mmchs1_fck",   &mmchs1_fck),
-       CLK(NULL,       "i2c3_fck",     &i2c3_fck),
-       CLK(NULL,       "i2c2_fck",     &i2c2_fck),
-       CLK(NULL,       "i2c1_fck",     &i2c1_fck),
-       CLK(NULL,       "mcbsp5_fck",   &mcbsp5_fck),
-       CLK(NULL,       "mcbsp1_fck",   &mcbsp1_fck),
-       CLK(NULL,       "core_48m_fck", &core_48m_fck),
-       CLK(NULL,       "mcspi4_fck",   &mcspi4_fck),
-       CLK(NULL,       "mcspi3_fck",   &mcspi3_fck),
-       CLK(NULL,       "mcspi2_fck",   &mcspi2_fck),
-       CLK(NULL,       "mcspi1_fck",   &mcspi1_fck),
-       CLK(NULL,       "uart2_fck",    &uart2_fck),
-       CLK(NULL,       "uart1_fck",    &uart1_fck),
-       CLK(NULL,       "core_12m_fck", &core_12m_fck),
-       CLK("omap_hdq.0",       "fck",  &hdq_fck),
-       CLK(NULL,       "hdq_fck",      &hdq_fck),
-       CLK(NULL,       "core_l3_ick",  &core_l3_ick),
-       CLK(NULL,       "sdrc_ick",     &sdrc_ick),
-       CLK(NULL,       "gpmc_fck",     &gpmc_fck),
-       CLK(NULL,       "core_l4_ick",  &core_l4_ick),
-       CLK("omap_hsmmc.1",     "ick",  &mmchs2_ick),
-       CLK("omap_hsmmc.0",     "ick",  &mmchs1_ick),
-       CLK(NULL,       "mmchs2_ick",   &mmchs2_ick),
-       CLK(NULL,       "mmchs1_ick",   &mmchs1_ick),
-       CLK("omap_hdq.0", "ick",        &hdq_ick),
-       CLK(NULL,       "hdq_ick",      &hdq_ick),
-       CLK("omap2_mcspi.4", "ick",     &mcspi4_ick),
-       CLK("omap2_mcspi.3", "ick",     &mcspi3_ick),
-       CLK("omap2_mcspi.2", "ick",     &mcspi2_ick),
-       CLK("omap2_mcspi.1", "ick",     &mcspi1_ick),
-       CLK(NULL,       "mcspi4_ick",   &mcspi4_ick),
-       CLK(NULL,       "mcspi3_ick",   &mcspi3_ick),
-       CLK(NULL,       "mcspi2_ick",   &mcspi2_ick),
-       CLK(NULL,       "mcspi1_ick",   &mcspi1_ick),
-       CLK("omap_i2c.3", "ick",        &i2c3_ick),
-       CLK("omap_i2c.2", "ick",        &i2c2_ick),
-       CLK("omap_i2c.1", "ick",        &i2c1_ick),
-       CLK(NULL,       "i2c3_ick",     &i2c3_ick),
-       CLK(NULL,       "i2c2_ick",     &i2c2_ick),
-       CLK(NULL,       "i2c1_ick",     &i2c1_ick),
-       CLK(NULL,       "uart2_ick",    &uart2_ick),
-       CLK(NULL,       "uart1_ick",    &uart1_ick),
-       CLK(NULL,       "gpt11_ick",    &gpt11_ick),
-       CLK(NULL,       "gpt10_ick",    &gpt10_ick),
-       CLK("omap-mcbsp.5", "ick",      &mcbsp5_ick),
-       CLK("omap-mcbsp.1", "ick",      &mcbsp1_ick),
-       CLK(NULL,       "mcbsp5_ick",   &mcbsp5_ick),
-       CLK(NULL,       "mcbsp1_ick",   &mcbsp1_ick),
-       CLK(NULL,       "omapctrl_ick", &omapctrl_ick),
-       CLK(NULL,       "dss_tv_fck",   &dss_tv_fck),
-       CLK(NULL,       "dss_96m_fck",  &dss_96m_fck),
-       CLK(NULL,       "dss2_alwon_fck",       &dss2_alwon_fck),
-       CLK(NULL,       "init_60m_fclk",        &dummy_ck),
-       CLK(NULL,       "gpt1_fck",     &gpt1_fck),
-       CLK(NULL,       "aes2_ick",     &aes2_ick),
-       CLK(NULL,       "wkup_32k_fck", &wkup_32k_fck),
-       CLK(NULL,       "gpio1_dbck",   &gpio1_dbck),
-       CLK(NULL,       "sha12_ick",    &sha12_ick),
-       CLK(NULL,       "wdt2_fck",             &wdt2_fck),
-       CLK("omap_wdt", "ick",          &wdt2_ick),
-       CLK(NULL,       "wdt2_ick",     &wdt2_ick),
-       CLK(NULL,       "wdt1_ick",     &wdt1_ick),
-       CLK(NULL,       "gpio1_ick",    &gpio1_ick),
-       CLK(NULL,       "omap_32ksync_ick", &omap_32ksync_ick),
-       CLK(NULL,       "gpt12_ick",    &gpt12_ick),
-       CLK(NULL,       "gpt1_ick",     &gpt1_ick),
-       CLK(NULL,       "per_96m_fck",  &per_96m_fck),
-       CLK(NULL,       "per_48m_fck",  &per_48m_fck),
-       CLK(NULL,       "uart3_fck",    &uart3_fck),
-       CLK(NULL,       "gpt2_fck",     &gpt2_fck),
-       CLK(NULL,       "gpt3_fck",     &gpt3_fck),
-       CLK(NULL,       "gpt4_fck",     &gpt4_fck),
-       CLK(NULL,       "gpt5_fck",     &gpt5_fck),
-       CLK(NULL,       "gpt6_fck",     &gpt6_fck),
-       CLK(NULL,       "gpt7_fck",     &gpt7_fck),
-       CLK(NULL,       "gpt8_fck",     &gpt8_fck),
-       CLK(NULL,       "gpt9_fck",     &gpt9_fck),
-       CLK(NULL,       "per_32k_alwon_fck", &per_32k_alwon_fck),
-       CLK(NULL,       "gpio6_dbck",   &gpio6_dbck),
-       CLK(NULL,       "gpio5_dbck",   &gpio5_dbck),
-       CLK(NULL,       "gpio4_dbck",   &gpio4_dbck),
-       CLK(NULL,       "gpio3_dbck",   &gpio3_dbck),
-       CLK(NULL,       "gpio2_dbck",   &gpio2_dbck),
-       CLK(NULL,       "wdt3_fck",     &wdt3_fck),
-       CLK(NULL,       "per_l4_ick",   &per_l4_ick),
-       CLK(NULL,       "gpio6_ick",    &gpio6_ick),
-       CLK(NULL,       "gpio5_ick",    &gpio5_ick),
-       CLK(NULL,       "gpio4_ick",    &gpio4_ick),
-       CLK(NULL,       "gpio3_ick",    &gpio3_ick),
-       CLK(NULL,       "gpio2_ick",    &gpio2_ick),
-       CLK(NULL,       "wdt3_ick",     &wdt3_ick),
-       CLK(NULL,       "uart3_ick",    &uart3_ick),
-       CLK(NULL,       "uart4_ick",    &uart4_ick),
-       CLK(NULL,       "gpt9_ick",     &gpt9_ick),
-       CLK(NULL,       "gpt8_ick",     &gpt8_ick),
-       CLK(NULL,       "gpt7_ick",     &gpt7_ick),
-       CLK(NULL,       "gpt6_ick",     &gpt6_ick),
-       CLK(NULL,       "gpt5_ick",     &gpt5_ick),
-       CLK(NULL,       "gpt4_ick",     &gpt4_ick),
-       CLK(NULL,       "gpt3_ick",     &gpt3_ick),
-       CLK(NULL,       "gpt2_ick",     &gpt2_ick),
-       CLK("omap-mcbsp.2", "ick",      &mcbsp2_ick),
-       CLK("omap-mcbsp.3", "ick",      &mcbsp3_ick),
-       CLK("omap-mcbsp.4", "ick",      &mcbsp4_ick),
-       CLK(NULL,       "mcbsp4_ick",   &mcbsp2_ick),
-       CLK(NULL,       "mcbsp3_ick",   &mcbsp3_ick),
-       CLK(NULL,       "mcbsp2_ick",   &mcbsp4_ick),
-       CLK(NULL,       "mcbsp2_fck",   &mcbsp2_fck),
-       CLK(NULL,       "mcbsp3_fck",   &mcbsp3_fck),
-       CLK(NULL,       "mcbsp4_fck",   &mcbsp4_fck),
-       CLK("etb",      "emu_src_ck",   &emu_src_ck),
-       CLK(NULL,       "emu_src_ck",   &emu_src_ck),
-       CLK(NULL,       "pclk_fck",     &pclk_fck),
-       CLK(NULL,       "pclkx2_fck",   &pclkx2_fck),
-       CLK(NULL,       "atclk_fck",    &atclk_fck),
-       CLK(NULL,       "traceclk_src_fck", &traceclk_src_fck),
-       CLK(NULL,       "traceclk_fck", &traceclk_fck),
-       CLK(NULL,       "secure_32k_fck", &secure_32k_fck),
-       CLK(NULL,       "gpt12_fck",    &gpt12_fck),
-       CLK(NULL,       "wdt1_fck",     &wdt1_fck),
-       CLK(NULL,       "timer_32k_ck", &omap_32k_fck),
-       CLK(NULL,       "timer_sys_ck", &sys_ck),
-       CLK(NULL,       "cpufreq_ck",   &dpll1_ck),
-};
-
-static const char *enable_init_clks[] = {
-       "sdrc_ick",
-       "gpmc_fck",
-       "omapctrl_ick",
-};
-
-int __init omap3xxx_clk_init(void)
-{
-       if (omap3_has_192mhz_clk())
-               omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
-
-       if (cpu_is_omap3630()) {
-               dpll3_m3x2_ck = dpll3_m3x2_ck_3630;
-               dpll4_m2x2_ck = dpll4_m2x2_ck_3630;
-               dpll4_m3x2_ck = dpll4_m3x2_ck_3630;
-               dpll4_m4x2_ck = dpll4_m4x2_ck_3630;
-               dpll4_m5x2_ck = dpll4_m5x2_ck_3630;
-               dpll4_m6x2_ck = dpll4_m6x2_ck_3630;
-       }
-
-       /*
-        * XXX This type of dynamic rewriting of the clock tree is
-        * deprecated and should be revised soon.
-        */
-       if (cpu_is_omap3630())
-               dpll4_dd = dpll4_dd_3630;
-       else
-               dpll4_dd = dpll4_dd_34xx;
-
-
-       /*
-        * 3505 must be tested before 3517, since 3517 returns true
-        * for both AM3517 chips and AM3517 family chips, which
-        * includes 3505.  Unfortunately there's no obvious family
-        * test for 3517/3505 :-(
-        */
-       if (soc_is_am35xx()) {
-               cpu_mask = RATE_IN_34XX;
-               omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks));
-               omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-               omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-       } else if (cpu_is_omap3630()) {
-               cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
-               omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks));
-               omap_clocks_register(omap36xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-               omap_clocks_register(omap34xx_omap36xx_clks,
-                                    ARRAY_SIZE(omap34xx_omap36xx_clks));
-               omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-               omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-       } else if (cpu_is_omap34xx()) {
-               if (omap_rev() == OMAP3430_REV_ES1_0) {
-                       cpu_mask = RATE_IN_3430ES1;
-                       omap_clocks_register(omap3430es1_clks,
-                                            ARRAY_SIZE(omap3430es1_clks));
-                       omap_clocks_register(omap34xx_omap36xx_clks,
-                                            ARRAY_SIZE(omap34xx_omap36xx_clks));
-                       omap_clocks_register(omap3xxx_clks,
-                                            ARRAY_SIZE(omap3xxx_clks));
-               } else {
-                       /*
-                        * Assume that anything that we haven't matched yet
-                        * has 3430ES2-type clocks.
-                        */
-                       cpu_mask = RATE_IN_3430ES2PLUS;
-                       omap_clocks_register(omap34xx_omap36xx_clks,
-                                            ARRAY_SIZE(omap34xx_omap36xx_clks));
-                       omap_clocks_register(omap36xx_omap3430es2plus_clks,
-                                            ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-                       omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                            ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-                       omap_clocks_register(omap3xxx_clks,
-                                            ARRAY_SIZE(omap3xxx_clks));
-               }
-       } else {
-               WARN(1, "clock: could not identify OMAP3 variant\n");
-       }
-
-               omap2_clk_disable_autoidle_all();
-
-       omap2_clk_enable_init_clocks(enable_init_clks,
-                                    ARRAY_SIZE(enable_init_clks));
-
-       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
-               (clk_get_rate(&osc_sys_ck) / 1000000),
-               (clk_get_rate(&osc_sys_ck) / 100000) % 10,
-               (clk_get_rate(&core_ck) / 1000000),
-               (clk_get_rate(&arm_fck) / 1000000));
-
-       /*
-        * Lock DPLL5 -- here only until other device init code can
-        * handle this
-        */
-       if (omap_rev() >= OMAP3430_REV_ES2_0)
-               omap3_clk_lock_dpll5();
-
-       /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
-       sdrc_ick_p = clk_get(NULL, "sdrc_ick");
-       arm_fck_p = clk_get(NULL, "arm_fck");
-
-       return 0;
-}
index 4ae4ccebced285e0598282028a3ad909fe3ed26c..6124db5c37aebf5d3092b66c355c326bdf6815f9 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
-#include <linux/clk-private.h>
 #include <asm/cpu.h>
 
 #include <trace/events/power.h>
@@ -632,21 +631,6 @@ const struct clk_hw_omap_ops clkhwops_wait = {
        .find_companion = omap2_clk_dflt_find_companion,
 };
 
-/**
- * omap_clocks_register - register an array of omap_clk
- * @ocs: pointer to an array of omap_clk to register
- */
-void __init omap_clocks_register(struct omap_clk oclks[], int cnt)
-{
-       struct omap_clk *c;
-
-       for (c = oclks; c < oclks + cnt; c++) {
-               clkdev_add(&c->lk);
-               if (!__clk_init(NULL, c->lk.clk))
-                       omap2_init_clk_hw_omap_clocks(c->lk.clk);
-       }
-}
-
 /**
  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
  * @mpurate_ck_name: clk name of the clock to change rate
index 1cf9dd85248abbe6511ac8c59133f9f290327de7..a56742f96000a64eb125010903304fb0d7e37414 100644 (file)
@@ -40,23 +40,29 @@ struct omap_clk {
 struct clockdomain;
 
 #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name)     \
-       static struct clk _name = {                             \
+       static struct clk_core _name##_core = {                 \
                .name = #_name,                                 \
                .hw = &_name##_hw.hw,                           \
                .parent_names = _parent_array_name,             \
                .num_parents = ARRAY_SIZE(_parent_array_name),  \
                .ops = &_clkops_name,                           \
+       };                                                      \
+       static struct clk _name = {                             \
+               .core = &_name##_core,                          \
        };
 
 #define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,     \
                                _clkops_name, _flags)           \
-       static struct clk _name = {                             \
+       static struct clk_core _name##_core = {                 \
                .name = #_name,                                 \
                .hw = &_name##_hw.hw,                           \
                .parent_names = _parent_array_name,             \
                .num_parents = ARRAY_SIZE(_parent_array_name),  \
                .ops = &_clkops_name,                           \
                .flags = _flags,                                \
+       };                                                      \
+       static struct clk _name = {                             \
+               .core = &_name##_core,                          \
        };
 
 #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)          \
@@ -238,7 +244,6 @@ struct ti_clk_features {
 extern struct ti_clk_features ti_clk_features;
 
 extern const struct clkops clkops_omap2_dflt_wait;
-extern const struct clkops clkops_dummy;
 extern const struct clkops clkops_omap2_dflt;
 
 extern struct clk_functions omap2_clk_functions;
@@ -247,7 +252,6 @@ extern const struct clksel_rate gpt_32k_rates[];
 extern const struct clksel_rate gpt_sys_rates[];
 extern const struct clksel_rate gfx_l3_rates[];
 extern const struct clksel_rate dsp_ick_rates[];
-extern struct clk dummy_ck;
 
 extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
 extern const struct clk_hw_omap_ops clkhwops_wait;
@@ -272,7 +276,5 @@ extern void __iomem *clk_memmaps[];
 extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
 extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 
-extern void omap_clocks_register(struct omap_clk *oclks, int cnt);
-
 void __init ti_clk_init_features(void);
 #endif
index ef4d21bfb96478da0b9ef681c931303aa9fb1bf4..61b60dfb14ce8a69e7d316aa17d831c7469d0222 100644 (file)
@@ -16,7 +16,6 @@
  * OMAP3xxx clock definition files.
  */
 
-#include <linux/clk-private.h>
 #include "clock.h"
 
 /* clksel_rate data common to 24xx/343x */
@@ -114,13 +113,3 @@ const struct clksel_rate div31_1to31_rates[] = {
        { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
        { .div = 0 },
 };
-
-/* Clocks shared between various OMAP SoCs */
-
-static struct clk_ops dummy_ck_ops = {};
-
-struct clk dummy_ck = {
-       .name = "dummy_clk",
-       .ops = &dummy_ck_ops,
-       .flags = CLK_IS_BASIC,
-};
index c2da2a0fe5ad64df80d6290f45658c697cfd4c96..44e57ec225d4401c1e2a81fcbfd929e499e65d58 100644 (file)
@@ -410,7 +410,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        int r;
        struct dpll_data *dd;
-       struct clk *parent;
+       struct clk_hw *parent;
 
        dd = clk->dpll_data;
        if (!dd)
@@ -427,13 +427,13 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
                }
        }
 
-       parent = __clk_get_parent(hw->clk);
+       parent = __clk_get_hw(__clk_get_parent(hw->clk));
 
        if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
-               WARN_ON(parent != dd->clk_bypass);
+               WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
                r = _omap3_noncore_dpll_bypass(clk);
        } else {
-               WARN_ON(parent != dd->clk_ref);
+               WARN_ON(parent != __clk_get_hw(dd->clk_ref));
                r = _omap3_noncore_dpll_lock(clk);
        }
 
@@ -473,6 +473,8 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw)
  * in failure.
  */
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_clk)
 {
@@ -549,7 +551,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
        if (!dd)
                return -EINVAL;
 
-       if (__clk_get_parent(hw->clk) != dd->clk_ref)
+       if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
+           __clk_get_hw(dd->clk_ref))
                return -EINVAL;
 
        if (dd->last_rounded_rate == 0)
index fc712240e5fd9d5173daca2df199d46e3ae521d9..f231be05b9a638de8e52cfe03d78765433815764 100644 (file)
@@ -202,6 +202,8 @@ out:
  * in failure.
  */
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
index e60780f0537492fb570e646e9bc4897101cab022..c4871c55bd8b641544a86a281121cad4c9219e42 100644 (file)
@@ -461,7 +461,17 @@ void __init omap3_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_soc_init = omap3xxx_clk_init;
+       if (!of_have_populated_dt()) {
+               omap3_prcm_legacy_iomaps_init();
+               if (soc_is_am35xx())
+                       omap_clk_soc_init = am35xx_clk_legacy_init;
+               else if (cpu_is_omap3630())
+                       omap_clk_soc_init = omap36xx_clk_legacy_init;
+               else if (omap_rev() == OMAP3430_REV_ES1_0)
+                       omap_clk_soc_init = omap3430es1_clk_legacy_init;
+               else
+                       omap_clk_soc_init = omap3430_clk_legacy_init;
+       }
 }
 
 void __init omap3430_init_early(void)
@@ -753,15 +763,17 @@ int __init omap_clk_init(void)
 
        ti_clk_init_features();
 
-       ret = of_prcm_init();
-       if (ret)
-               return ret;
+       if (of_have_populated_dt()) {
+               ret = of_prcm_init();
+               if (ret)
+                       return ret;
 
-       of_clk_init(NULL);
+               of_clk_init(NULL);
 
-       ti_dt_clk_init_retry_clks();
+               ti_dt_clk_init_retry_clks();
 
-       ti_dt_clockdomains_setup();
+               ti_dt_clockdomains_setup();
+       }
 
        ret = omap_clk_soc_init();
 
index 2418bdf28ca271599ae108824abe3b6493d0bbf4..cee0fe1ee6ffb0d3e5026a7328458feb34dc2732 100644 (file)
@@ -242,7 +242,7 @@ static int __init omap4_sar_ram_init(void)
 }
 omap_early_initcall(omap4_sar_ram_init);
 
-static struct of_device_id gic_match[] = {
+static const struct of_device_id gic_match[] = {
        { .compatible = "arm,cortex-a9-gic", },
        { .compatible = "arm,cortex-a15-gic", },
        { },
index 77752e49d8d4c666a2f48fb816546ccfbf53b212..b9061a6a2db8998314cf83dc0aa98dab2899ce43 100644 (file)
@@ -20,6 +20,7 @@ extern void __iomem *prm_base;
 extern u16 prm_features;
 extern void omap2_set_globals_prm(void __iomem *prm);
 int of_prcm_init(void);
+void omap3_prcm_legacy_iomaps_init(void);
 # endif
 
 /*
index c5e00c6714b1d99fc5afaabe3f41985e1fce4bad..5713bbdf83bc57ac7314f6e27455e3851772bc09 100644 (file)
@@ -674,7 +674,7 @@ int __init omap3xxx_prm_init(void)
        return prm_register(&omap3xxx_prm_ll_data);
 }
 
-static struct of_device_id omap3_prm_dt_match_table[] = {
+static const struct of_device_id omap3_prm_dt_match_table[] = {
        { .compatible = "ti,omap3-prm" },
        { }
 };
index 408c64efb80700868fa4c8b0138a2763a78bc161..a08a617a6c110365cf20ce9c5df54edef19c20c5 100644 (file)
@@ -712,7 +712,7 @@ int __init omap44xx_prm_init(void)
        return prm_register(&omap44xx_prm_ll_data);
 }
 
-static struct of_device_id omap_prm_dt_match_table[] = {
+static const struct of_device_id omap_prm_dt_match_table[] = {
        { .compatible = "ti,omap4-prm" },
        { .compatible = "ti,omap5-prm" },
        { .compatible = "ti,dra7-prm" },
index 264b5e29404d0eded3c9eca764384e8d93a5e563..bfaa7ba595cc832ec7783e759db4425c5e1e58c0 100644 (file)
@@ -35,6 +35,8 @@
 #include "prm44xx.h"
 #include "common.h"
 #include "clock.h"
+#include "cm.h"
+#include "control.h"
 
 /*
  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
@@ -641,6 +643,15 @@ int __init of_prcm_init(void)
        return 0;
 }
 
+void __init omap3_prcm_legacy_iomaps_init(void)
+{
+       ti_clk_ll_ops = &omap_clk_ll_ops;
+
+       clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD;
+       clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD;
+       clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get();
+}
+
 static int __init prm_late_init(void)
 {
        if (prm_ll_data->late_init)
index a219dc310d5de545527030fe169b88a494c0578a..e03d8b5c9ad0aa174b46c2e54cf2ade518d4cfdc 100644 (file)
@@ -27,7 +27,6 @@ config ARCH_ATLAS7
        select CPU_V7
        select HAVE_ARM_SCU if SMP
        select HAVE_SMP
-       select SMP_ON_UP if SMP
        help
           Support for CSR SiRFSoC ARM Cortex A7 Platform
 
index 0c819bb88418369bab10f336766c9974c89f48dc..8cadb302a7d2f54a3bbcddaf7296606293d69e4e 100644 (file)
@@ -21,7 +21,7 @@ static void __init sirfsoc_init_late(void)
 }
 
 #ifdef CONFIG_ARCH_ATLAS6
-static const char *atlas6_dt_match[] __initconst = {
+static const char *const atlas6_dt_match[] __initconst = {
        "sirf,atlas6",
        NULL
 };
@@ -36,7 +36,7 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_PRIMA2
-static const char *prima2_dt_match[] __initconst = {
+static const char *const prima2_dt_match[] __initconst = {
        "sirf,prima2",
        NULL
 };
@@ -52,7 +52,7 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_ATLAS7
-static const char *atlas7_dt_match[] __initdata = {
+static const char *const atlas7_dt_match[] __initconst = {
        "sirf,atlas7",
        NULL
 };
index fc2b03c81e5f57f2f6be7146afa7022f3f22d476..e46c91094dde3c66065b4d7e040ef7a057d9d04a 100644 (file)
@@ -40,7 +40,7 @@ static void sirfsoc_secondary_init(unsigned int cpu)
        spin_unlock(&boot_lock);
 }
 
-static struct of_device_id clk_ids[]  = {
+static const struct of_device_id clk_ids[]  = {
        { .compatible = "sirf,atlas7-clkc" },
        {},
 };
index 343c4e3a7c5d1aceb136a196dcae946a7c8f5a69..7d8eab857a930b34ce9f02d4f48b83b05eaee52c 100644 (file)
@@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = {
        }
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static void idp_backlight_power(int on)
index ad777b353bd5234797d93031ab6815747b65e363..28da319d389f2036e8e551b629054db546daecbe 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pwm_backlight.h>
+#include <linux/smc91x.h>
 
 #include <asm/types.h>
 #include <asm/setup.h>
@@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
        [1] = {
                .start  = LPD270_ETHERNET_IRQ,
                .end    = LPD270_ETHERNET_IRQ,
-               .flags  = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
        },
 };
 
+struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static struct resource lpd270_flash_resources[] = {
index 850e506926dfb8adbc137f652353d8d84cc40197..c309593abdb223e9c9499c0469f080344ea40fd7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_data/video-clcd-versatile.h>
 #include <linux/io.h>
 #include <linux/smsc911x.h>
+#include <linux/smc91x.h>
 #include <linux/ata_platform.h>
 #include <linux/amba/mmci.h>
 #include <linux/gfp.h>
@@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
        .phy_interface  = PHY_INTERFACE_MODE_MII,
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device realview_eth_device = {
        .name           = "smsc911x",
        .id             = 0,
@@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
        realview_eth_device.resource = res;
        if (strcmp(realview_eth_device.name, "smsc911x") == 0)
                realview_eth_device.dev.platform_data = &smsc911x_config;
+       else
+               realview_eth_device.dev.platform_data = &smc91x_platdata;
 
        return platform_device_register(&realview_eth_device);
 }
index 64c88d657f9efc6360600380910248a51fd3c73b..b3869cbbcc6858c5ddb6b8ab9808773cde4dfae6 100644 (file)
@@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
        [1] = {
                .start          = IRQ_EB_ETH,
                .end            = IRQ_EB_ETH,
-               .flags          = IORESOURCE_IRQ,
+               .flags          = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
        },
 };
 
index 5078932c1683278c45cb57a491483427132679c1..ae4eb7cc4bcc5a1c5dffa8a26f98e5c58b6cf3f8 100644 (file)
@@ -11,6 +11,7 @@ config ARCH_ROCKCHIP
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select DW_APB_TIMER_OF
+       select REGULATOR if PM
        select ROCKCHIP_TIMER
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
index 7d752ff39f91f4a7d737e460f3d09e089e1383f6..7c889c04604b5ec0d9faaec30d0b612275c4cda4 100644 (file)
@@ -24,7 +24,13 @@ extern unsigned long rkpm_bootdata_ddr_data;
 extern unsigned long rk3288_bootram_sz;
 
 void rockchip_slp_cpu_resume(void);
+#ifdef CONFIG_PM_SLEEP
 void __init rockchip_suspend_init(void);
+#else
+static inline void rockchip_suspend_init(void)
+{
+}
+#endif
 
 /****** following is rk3288 defined **********/
 #define RK3288_PMU_WAKEUP_CFG0         0x00
index 43eb1eaea0c927f8e24e94f27831d92931172ab1..83e656ea95ae13f1ed23003d6872479370e43abc 100644 (file)
@@ -63,7 +63,7 @@ static void __init s5pv210_dt_init_late(void)
        s5pv210_pm_init();
 }
 
-static char const *s5pv210_dt_compat[] __initconst = {
+static char const *const s5pv210_dt_compat[] __initconst = {
        "samsung,s5pc110",
        "samsung,s5pv210",
        NULL
index 169262e3040dd77b25ae268bc39880929805f63c..7b0cd3172354dfcfb4b8d710c15949e6dae356ac 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/serial_core.h>
 #include <linux/slab.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
                        0x02000000, "smc91x-attrib"),
                { .flags = IORESOURCE_IRQ },
        };
+       struct smc91x_platdata smc91x_platdata = {
+               .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
+       };
        struct platform_device_info smc91x_devinfo = {
                .parent = &dev->dev,
                .name = "smc91x",
                .id = 0,
                .res = smc91x_resources,
                .num_res = ARRAY_SIZE(smc91x_resources),
+               .data = &smc91c_platdata,
+               .size_data = sizeof(smc91c_platdata),
        };
        int ret, irq;
 
index 091261878effde2e56d1b4f81a157f7d696de765..696fd0fe48062590d69a1d9b08e7d85a9f21b143 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/setup.h>
@@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
 #endif
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
 
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev = {
+               .platform_data  = &smc91c_platdata,
+       },
 };
 
 static struct platform_device *devices[] __initdata = {
index aad97be9cbe1b0fabe069136e2e264cc51de8a24..37f7b15c01bc073678b49189c4b9d4a5a9aacd63 100644 (file)
@@ -37,7 +37,7 @@ static void __init emev2_map_io(void)
        iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
 }
 
-static const char *emev2_boards_compat_dt[] __initconst = {
+static const char *const emev2_boards_compat_dt[] __initconst = {
        "renesas,emev2",
        NULL,
 };
index 8825bc9e2553057145a9d2839bf4001e71960bd8..3b1ac463a4947f21f3e82de66d8853a902367fa4 100644 (file)
@@ -13,6 +13,7 @@ menuconfig ARCH_STI
        select ARM_ERRATA_775420
        select PL310_ERRATA_753970 if CACHE_L2X0
        select PL310_ERRATA_769419 if CACHE_L2X0
+       select RESET_CONTROLLER
        help
          Include support for STiH41x SOCs like STiH415/416 using the device tree
          for discovery
index ef016af1c9e769176378e2930f24dc2f060adc09..914341bcef25faf08631113ce98b90228d728d9a 100644 (file)
@@ -91,8 +91,6 @@ static void __init tegra_dt_init(void)
        struct soc_device *soc_dev;
        struct device *parent = NULL;
 
-       tegra_clocks_apply_init_table();
-
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                goto out;
index 0d4b5b46f15b551f191bedaa6f0b9394da84dad1..4d71c90f801caf6ac5a6e4d150c7751e6066f2da 100644 (file)
@@ -49,7 +49,7 @@ static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = {
        [DOMAIN_VAPE] = &ux500_pm_domain_vape,
 };
 
-static struct of_device_id ux500_pm_domain_matches[] = {
+static const struct of_device_id ux500_pm_domain_matches[] __initconst = {
        { .compatible = "stericsson,ux500-pm-domains", },
        { },
 };
index 9f9bc61ca64bc6af4ddf2e7bfe7e2d6ccac3fe55..7de3e92a13b0ef8896c56a9101e21ea7a8db6411 100644 (file)
@@ -35,7 +35,7 @@ static void __init versatile_dt_init(void)
                             versatile_auxdata_lookup, NULL);
 }
 
-static const char *versatile_dt_match[] __initconst = {
+static const char *const versatile_dt_match[] __initconst = {
        "arm,versatile-ab",
        "arm,versatile-pb",
        NULL,
index d6b16d9a78380e78ff7d33855f1c6caa557d2e83..3c2509b4b6946bfcfd9b4e7a325b3945ba6c244c 100644 (file)
@@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM
        depends on MCPM
        select ARM_CCI
        select ARCH_VEXPRESS_SPC
+       select ARM_CPU_SUSPEND
        help
          Support for CPU and cluster power management on Versatile Express
          with a TC2 (A15x2 A7x3) big.LITTLE core tile.
index c43c714555661337048b72a5a21a6b5357659567..9b4f29e595a423f6d00540a9a169decae0f3b4ea 100644 (file)
@@ -892,13 +892,6 @@ config CACHE_L2X0
 
 if CACHE_L2X0
 
-config CACHE_PL310
-       bool
-       default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
-       help
-         This option enables optimisations for the PL310 cache
-         controller.
-
 config PL310_ERRATA_588369
        bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
        help
index 903dba064a034c7e5d9fff950d3fa334301130d9..170a116d1b298c1befb81efdeaee49735362fd26 100644 (file)
@@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
        int i = 0;
 
        if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, gfp);
+               pages = kzalloc(array_size, GFP_KERNEL);
        else
                pages = vzalloc(array_size);
        if (!pages)
index 27f32962e55c60f8b0e28407f12746513473926f..4eac8dcea423e2ff50b0101555fa475507769c91 100644 (file)
@@ -34,6 +34,7 @@
                        reg = <0x0 0x0>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@1 {
                        device_type = "cpu";
@@ -41,6 +42,7 @@
                        reg = <0x0 0x1>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@2 {
                        device_type = "cpu";
@@ -48,6 +50,7 @@
                        reg = <0x0 0x2>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@3 {
                        device_type = "cpu";
                        reg = <0x0 0x3>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache0 {
+                       compatible = "cache";
                };
        };
 
index d429129ecb3d03fe3a7460ecd3ed9d02950cb193..133ee59de2d70672db3ca648952006cdfae7c7cb 100644 (file)
@@ -39,6 +39,7 @@
                        reg = <0x0 0x0>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A57_L2>;
                };
 
                A57_1: cpu@1 {
@@ -46,6 +47,7 @@
                        reg = <0x0 0x1>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A57_L2>;
                };
 
                A53_0: cpu@100 {
@@ -53,6 +55,7 @@
                        reg = <0x0 0x100>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
                };
 
                A53_1: cpu@101 {
@@ -60,6 +63,7 @@
                        reg = <0x0 0x101>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
                };
 
                A53_2: cpu@102 {
@@ -67,6 +71,7 @@
                        reg = <0x0 0x102>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
                };
 
                A53_3: cpu@103 {
                        reg = <0x0 0x103>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
+               };
+
+               A57_L2: l2-cache0 {
+                       compatible = "cache";
+               };
+
+               A53_L2: l2-cache1 {
+                       compatible = "cache";
                };
        };
 
index efc59b3baf63fb0eb374383a3eb9ebbd73dcbdb8..20addabbd127c89acf70399c5605dfedccb55203 100644 (file)
@@ -37,6 +37,7 @@
                        reg = <0x0 0x0>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@1 {
                        device_type = "cpu";
@@ -44,6 +45,7 @@
                        reg = <0x0 0x1>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@2 {
                        device_type = "cpu";
@@ -51,6 +53,7 @@
                        reg = <0x0 0x2>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@3 {
                        device_type = "cpu";
                        reg = <0x0 0x3>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache0 {
+                       compatible = "cache";
                };
        };
 
index 5720608c50b1b7f969f881b8695a78742868f379..abb79b3cfcfea158cdcaa8ac1ffcbd32699da9b0 100644 (file)
@@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
 obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
 aes-neon-blk-y := aes-glue-neon.o aes-neon.o
 
-AFLAGS_aes-ce.o                := -DINTERLEAVE=2 -DINTERLEAVE_INLINE
+AFLAGS_aes-ce.o                := -DINTERLEAVE=4
 AFLAGS_aes-neon.o      := -DINTERLEAVE=4
 
 CFLAGS_aes-glue-ce.o   := -DUSE_V8_CRYPTO_EXTENSIONS
index 5901480bfdcaf1cd65aeb5b8ffd31c337ea73aed..750bac4e637e5323f29bd4859b6e655b3a035d95 100644 (file)
@@ -20,6 +20,9 @@
 #error "Only include this from assembly code"
 #endif
 
+#ifndef __ASM_ASSEMBLER_H
+#define __ASM_ASSEMBLER_H
+
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -155,3 +158,5 @@ lr  .req    x30             // link register
 #endif
        orr     \rd, \lbits, \hbits, lsl #32
        .endm
+
+#endif /* __ASM_ASSEMBLER_H */
index 0710654631e789121f7b0d2247b485ebf585685a..c60643f14cda97e7ba6dd9676f10b133af4b0627 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __ASM_CPUIDLE_H
 #define __ASM_CPUIDLE_H
 
+#include <asm/proc-fns.h>
+
 #ifdef CONFIG_CPU_IDLE
 extern int cpu_init_idle(unsigned int cpu);
 extern int cpu_suspend(unsigned long arg);
index e2ff32a93b5cefc2c6fc2866d4d7befed27259fc..d2f49423c5dcbad70f63cbe777d522edbd41da75 100644 (file)
@@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
 __AARCH64_INSN_FUNCS(bics,     0x7F200000, 0x6A200000)
 __AARCH64_INSN_FUNCS(b,                0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,       0xFC000000, 0x94000000)
-__AARCH64_INSN_FUNCS(cbz,      0xFE000000, 0x34000000)
-__AARCH64_INSN_FUNCS(cbnz,     0xFE000000, 0x35000000)
+__AARCH64_INSN_FUNCS(cbz,      0x7F000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz,     0x7F000000, 0x35000000)
+__AARCH64_INSN_FUNCS(tbz,      0x7F000000, 0x36000000)
+__AARCH64_INSN_FUNCS(tbnz,     0x7F000000, 0x37000000)
 __AARCH64_INSN_FUNCS(bcond,    0xFF000010, 0x54000000)
 __AARCH64_INSN_FUNCS(svc,      0xFFE0001F, 0xD4000001)
 __AARCH64_INSN_FUNCS(hvc,      0xFFE0001F, 0xD4000002)
index 16449c535e50fdcf358df430d67f38cce3893df3..800ec0e87ed955bbd38e93e448b19b6b4401625b 100644 (file)
@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-                             PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+                             PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index f9be30ea1cbd8bc5b00cf2627c2e0be47ab54d98..20e9591a60cff97c5ef3c93cbadab1aa1312fe09 100644 (file)
@@ -45,7 +45,8 @@
 #define STACK_TOP              STACK_TOP_MAX
 #endif /* CONFIG_COMPAT */
 
-#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
+extern phys_addr_t arm64_dma_phys_limit;
+#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
 #endif /* __KERNEL__ */
 
 struct debug_info {
index 73f0ce570fb31caa23fe7da9b99edd68d4ef6679..4abe9b945f77726ae0a505b9abffbb471db6adf8 100644 (file)
 #include <linux/sched.h>
 #include <asm/cputype.h>
 
-extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
-extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
-
-extern struct cpu_tlb_fns cpu_tlb;
-
 /*
  *     TLB Management
  *     ==============
index 3bf8f4e99a511c67a3a2d9c4a739929cedd5889f..07e1ba449bf1e0bd1b928646861363dd3bf3714a 100644 (file)
@@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a,b)        ((a) == (b))
+#define segment_eq(a, b)       ((a) == (b))
 
 /*
  * Return 1 if addr < current->addr_limit, 0 otherwise.
@@ -147,7 +147,7 @@ do {                                                                        \
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 } while (0)
 
 #define __get_user(x, ptr)                                             \
index bef04afd603190135972256e52981b0889b047bf..5ee07eee80c2b0c1f5ba2b1292420eb396103be0 100644 (file)
@@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg
 arm64-obj-y            := cputable.o debug-monitors.o entry.o irq.o fpsimd.o   \
                           entry-fpsimd.o process.o ptrace.o setup.o signal.o   \
                           sys.o stacktrace.o time.o traps.o io.o vdso.o        \
-                          hyp-stub.o psci.o cpu_ops.o insn.o return_address.o  \
-                          cpuinfo.o cpu_errata.o alternative.o cacheinfo.o
+                          hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o       \
+                          return_address.o cpuinfo.o cpu_errata.o              \
+                          alternative.o cacheinfo.o
 
 arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o         \
                                           sys_compat.o entry32.o               \
index cf8556ae09d04ad0c81855870aa052cef746f8e7..c851be795080336938f4826cc0608234b0e34bfa 100644 (file)
@@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable)
 
        branch = aarch64_insn_gen_branch_imm(pc,
                                             (unsigned long)ftrace_graph_caller,
-                                            AARCH64_INSN_BRANCH_LINK);
+                                            AARCH64_INSN_BRANCH_NOLINK);
        nop = aarch64_insn_gen_nop();
 
        if (enable)
index 27d4864577e5d47cca67f626ea3638b9f3f8b4ec..c8eca88f12e6b2702df24bc758ac99815226827c 100644 (file)
@@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap)
 
        if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
                page = vmalloc_to_page(addr);
-       else
+       else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
                page = virt_to_page(addr);
+       else
+               return addr;
 
        BUG_ON(!page);
        set_fixmap(fixmap, page_to_phys(page));
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
new file mode 100644 (file)
index 0000000..cf83e61
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+
+/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_hvc)
+       hvc     #0
+       ret
+ENDPROC(__invoke_psci_fn_hvc)
+
+/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_smc)
+       smc     #0
+       ret
+ENDPROC(__invoke_psci_fn_smc)
index 3425f311c49ed99588998d3a9b42e035bf592d84..9b8a70ae64a187d2647a10576a789ebba213788c 100644 (file)
@@ -57,6 +57,9 @@ static struct psci_operations psci_ops;
 static int (*invoke_psci_fn)(u64, u64, u64, u64);
 typedef int (*psci_initcall_t)(const struct device_node *);
 
+asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+
 enum psci_function {
        PSCI_FN_CPU_SUSPEND,
        PSCI_FN_CPU_ON,
@@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state,
                        PSCI_0_2_POWER_STATE_AFFL_SHIFT;
 }
 
-/*
- * The following two functions are invoked via the invoke_psci_fn pointer
- * and will not be inlined, allowing us to piggyback on the AAPCS.
- */
-static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
-                                        u64 arg2)
-{
-       asm volatile(
-                       __asmeq("%0", "x0")
-                       __asmeq("%1", "x1")
-                       __asmeq("%2", "x2")
-                       __asmeq("%3", "x3")
-                       "hvc    #0\n"
-               : "+r" (function_id)
-               : "r" (arg0), "r" (arg1), "r" (arg2));
-
-       return function_id;
-}
-
-static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
-                                        u64 arg2)
-{
-       asm volatile(
-                       __asmeq("%0", "x0")
-                       __asmeq("%1", "x1")
-                       __asmeq("%2", "x2")
-                       __asmeq("%3", "x3")
-                       "smc    #0\n"
-               : "+r" (function_id)
-               : "r" (arg0), "r" (arg1), "r" (arg2));
-
-       return function_id;
-}
-
 static int psci_get_version(void)
 {
        int err;
index c20a300e22137f741a236cb8385864b90fac9df4..d26fcd4cd6e6219cae5213b4b329d53d90a86a9b 100644 (file)
@@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
        case __SI_TIMER:
                 err |= __put_user(from->si_tid, &to->si_tid);
                 err |= __put_user(from->si_overrun, &to->si_overrun);
-                err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
-                                  &to->si_ptr);
+                err |= __put_user(from->si_int, &to->si_int);
                break;
        case __SI_POLL:
                err |= __put_user(from->si_band, &to->si_band);
@@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
        case __SI_MESGQ: /* But this is */
                err |= __put_user(from->si_pid, &to->si_pid);
                err |= __put_user(from->si_uid, &to->si_uid);
-               err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
+               err |= __put_user(from->si_int, &to->si_int);
                break;
        case __SI_SYS:
                err |= __put_user((compat_uptr_t)(unsigned long)
index fe652ffd34c28090076b8d8358c6e40f7d77034d..efa79e8d4196d01318779cd76f2e926c40765ae9 100644 (file)
@@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime)
 /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
 ENTRY(__kernel_clock_getres)
        .cfi_startproc
-       cbz     w1, 3f
-
        cmp     w0, #CLOCK_REALTIME
        ccmp    w0, #CLOCK_MONOTONIC, #0x4, ne
        b.ne    1f
@@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres)
        b.ne    4f
        ldr     x2, 6f
 2:
+       cbz     w1, 3f
        stp     xzr, x2, [x1]
 
 3:     /* res == NULL. */
index 0a24b9b8c6982ddc675bbbd40b7baaecc247f1f6..58e0c2bdde04221cfd66e6c0ba5f6cb47051f544 100644 (file)
@@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
        .mapping_error = swiotlb_dma_mapping_error,
 };
 
-extern int swiotlb_late_init_with_default_size(size_t default_size);
-
 static int __init atomic_pool_init(void)
 {
        pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -411,21 +409,13 @@ out:
        return -ENOMEM;
 }
 
-static int __init swiotlb_late_init(void)
+static int __init arm64_dma_init(void)
 {
-       size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+       int ret;
 
        dma_ops = &swiotlb_dma_ops;
 
-       return swiotlb_late_init_with_default_size(swiotlb_size);
-}
-
-static int __init arm64_dma_init(void)
-{
-       int ret = 0;
-
-       ret |= swiotlb_late_init();
-       ret |= atomic_pool_init();
+       ret = atomic_pool_init();
 
        return ret;
 }
index 71145f952070ebcd6067fcb49c5ad29e5c2ec11f..ae85da6307bb921e286bb7a99a218cab5dbe137e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 #include <linux/efi.h>
+#include <linux/swiotlb.h>
 
 #include <asm/fixmap.h>
 #include <asm/memory.h>
@@ -45,6 +46,7 @@
 #include "mm.h"
 
 phys_addr_t memstart_addr __read_mostly = 0;
+phys_addr_t arm64_dma_phys_limit __read_mostly;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 static int __init early_initrd(char *p)
@@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               max_dma = PFN_DOWN(max_zone_dma_phys());
+               max_dma = PFN_DOWN(arm64_dma_phys_limit);
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -156,8 +158,6 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
-       phys_addr_t dma_phys_limit = 0;
-
        memblock_enforce_memory_limit(memory_limit);
 
        /*
@@ -174,8 +174,10 @@ void __init arm64_memblock_init(void)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
-               dma_phys_limit = max_zone_dma_phys();
-       dma_contiguous_reserve(dma_phys_limit);
+               arm64_dma_phys_limit = max_zone_dma_phys();
+       else
+               arm64_dma_phys_limit = PHYS_MASK + 1;
+       dma_contiguous_reserve(arm64_dma_phys_limit);
 
        memblock_allow_resize();
        memblock_dump_all();
@@ -276,6 +278,8 @@ static void __init free_unused_memmap(void)
  */
 void __init mem_init(void)
 {
+       swiotlb_init(1);
+
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
index 245b2ee213c915df2fc353950c3839be93daee88..a46f7cf3e1eab23d4cdfc224d21fe571917ef413 100644 (file)
@@ -26,7 +26,7 @@ typedef struct {
  * For historical reasons (Data Segment Register?), these macros are misnamed.
  */
 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define segment_eq(a,b)        ((a).is_user_space == (b).is_user_space)
+#define segment_eq(a, b)       ((a).is_user_space == (b).is_user_space)
 
 #define USER_ADDR_LIMIT 0x80000000
 
@@ -108,8 +108,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  *
  * Returns zero on success, or -EFAULT on error.
  */
-#define put_user(x,ptr)        \
-       __put_user_check((x),(ptr),sizeof(*(ptr)))
+#define put_user(x, ptr)       \
+       __put_user_check((x), (ptr), sizeof(*(ptr)))
 
 /*
  * get_user: - Get a simple variable from user space.
@@ -128,8 +128,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  * Returns zero on success, or -EFAULT on error.
  * On error, the variable @x is set to zero.
  */
-#define get_user(x,ptr) \
-       __get_user_check((x),(ptr),sizeof(*(ptr)))
+#define get_user(x, ptr) \
+       __get_user_check((x), (ptr), sizeof(*(ptr)))
 
 /*
  * __put_user: - Write a simple value into user space, with less checking.
@@ -150,8 +150,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  *
  * Returns zero on success, or -EFAULT on error.
  */
-#define __put_user(x,ptr) \
-       __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+       __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
 /*
  * __get_user: - Get a simple variable from user space, with less checking.
@@ -173,8 +173,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  * Returns zero on success, or -EFAULT on error.
  * On error, the variable @x is set to zero.
  */
-#define __get_user(x,ptr) \
-       __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
 extern int __get_user_bad(void);
 extern int __put_user_bad(void);
@@ -191,7 +191,7 @@ extern int __put_user_bad(void);
        default: __gu_err = __get_user_bad(); break;                    \
        }                                                               \
                                                                        \
-       x = (typeof(*(ptr)))__gu_val;                                   \
+       x = (__force typeof(*(ptr)))__gu_val;                           \
        __gu_err;                                                       \
 })
 
@@ -222,7 +222,7 @@ extern int __put_user_bad(void);
        } else {                                                        \
                __gu_err = -EFAULT;                                     \
        }                                                               \
-       x = (typeof(*(ptr)))__gu_val;                                   \
+       x = (__force typeof(*(ptr)))__gu_val;                           \
        __gu_err;                                                       \
 })
 
@@ -278,7 +278,7 @@ extern int __put_user_bad(void);
                                       __pu_err);                       \
                        break;                                          \
                case 8:                                                 \
-                       __put_user_asm("d", __pu_addr, __pu_val,                \
+                       __put_user_asm("d", __pu_addr, __pu_val,        \
                                       __pu_err);                       \
                        break;                                          \
                default:                                                \
index cc92cdb9994c8850f36c146d2a77f68b60d1e8a5..1d8b147282cf789498843c7817552d9959a46544 100644 (file)
@@ -607,7 +607,7 @@ static struct dw_dma_platform_data dw_dmac0_data = {
        .nr_channels    = 3,
        .block_size     = 4095U,
        .nr_masters     = 2,
-       .data_width     = { 2, 2, 0, 0 },
+       .data_width     = { 2, 2 },
 };
 
 static struct resource dw_dmac0_resource[] = {
diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/arch/blackfin/include/asm/bfin_rotary.h
deleted file mode 100644 (file)
index 8895a75..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * board initialization should put one of these structures into platform_data
- * and place the bfin-rotary onto platform_bus named "bfin-rotary".
- *
- * Copyright 2008-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _BFIN_ROTARY_H
-#define _BFIN_ROTARY_H
-
-/* mode bitmasks */
-#define ROT_QUAD_ENC   CNTMODE_QUADENC /* quadrature/grey code encoder mode */
-#define ROT_BIN_ENC    CNTMODE_BINENC  /* binary encoder mode */
-#define ROT_UD_CNT     CNTMODE_UDCNT   /* rotary counter mode */
-#define ROT_DIR_CNT    CNTMODE_DIRCNT  /* direction counter mode */
-
-#define ROT_DEBE       DEBE            /* Debounce Enable */
-
-#define ROT_CDGINV     CDGINV          /* CDG Pin Polarity Invert */
-#define ROT_CUDINV     CUDINV          /* CUD Pin Polarity Invert */
-#define ROT_CZMINV     CZMINV          /* CZM Pin Polarity Invert */
-
-struct bfin_rotary_platform_data {
-       /* set rotary UP KEY_### or BTN_### in case you prefer
-        * bfin-rotary to send EV_KEY otherwise set 0
-        */
-       unsigned int rotary_up_key;
-       /* set rotary DOWN KEY_### or BTN_### in case you prefer
-        * bfin-rotary to send EV_KEY otherwise set 0
-        */
-       unsigned int rotary_down_key;
-       /* set rotary BUTTON KEY_### or BTN_### */
-       unsigned int rotary_button_key;
-       /* set rotary Relative Axis REL_### in case you prefer
-        * bfin-rotary to send EV_REL otherwise set 0
-        */
-       unsigned int rotary_rel_code;
-       unsigned short debounce;        /* 0..17 */
-       unsigned short mode;
-       unsigned short pm_wakeup;
-};
-
-/* CNT_CONFIG bitmasks */
-#define CNTE           (1 << 0)        /* Counter Enable */
-#define DEBE           (1 << 1)        /* Debounce Enable */
-#define CDGINV         (1 << 4)        /* CDG Pin Polarity Invert */
-#define CUDINV         (1 << 5)        /* CUD Pin Polarity Invert */
-#define CZMINV         (1 << 6)        /* CZM Pin Polarity Invert */
-#define CNTMODE_SHIFT  8
-#define CNTMODE                (0x7 << CNTMODE_SHIFT)  /* Counter Operating Mode */
-#define ZMZC           (1 << 1)        /* CZM Zeroes Counter Enable */
-#define BNDMODE_SHIFT  12
-#define BNDMODE                (0x3 << BNDMODE_SHIFT)  /* Boundary register Mode */
-#define INPDIS         (1 << 15)       /* CUG and CDG Input Disable */
-
-#define CNTMODE_QUADENC        (0 << CNTMODE_SHIFT)    /* quadrature encoder mode */
-#define CNTMODE_BINENC (1 << CNTMODE_SHIFT)    /* binary encoder mode */
-#define CNTMODE_UDCNT  (2 << CNTMODE_SHIFT)    /* up/down counter mode */
-#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT)    /* direction counter mode */
-#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT)    /* direction timer mode */
-
-#define BNDMODE_COMP   (0 << BNDMODE_SHIFT)    /* boundary compare mode */
-#define BNDMODE_ZERO   (1 << BNDMODE_SHIFT)    /* boundary compare and zero mode */
-#define BNDMODE_CAPT   (2 << BNDMODE_SHIFT)    /* boundary capture mode */
-#define BNDMODE_AEXT   (3 << BNDMODE_SHIFT)    /* boundary auto-extend mode */
-
-/* CNT_IMASK bitmasks */
-#define ICIE           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Enable */
-#define UCIE           (1 << 1)        /* Up count Interrupt Enable */
-#define DCIE           (1 << 2)        /* Down count Interrupt Enable */
-#define MINCIE         (1 << 3)        /* Min Count Interrupt Enable */
-#define MAXCIE         (1 << 4)        /* Max Count Interrupt Enable */
-#define COV31IE                (1 << 5)        /* Bit 31 Overflow Interrupt Enable */
-#define COV15IE                (1 << 6)        /* Bit 15 Overflow Interrupt Enable */
-#define CZEROIE                (1 << 7)        /* Count to Zero Interrupt Enable */
-#define CZMIE          (1 << 8)        /* CZM Pin Interrupt Enable */
-#define CZMEIE         (1 << 9)        /* CZM Error Interrupt Enable */
-#define CZMZIE         (1 << 10)       /* CZM Zeroes Counter Interrupt Enable */
-
-/* CNT_STATUS bitmasks */
-#define ICII           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Identifier */
-#define UCII           (1 << 1)        /* Up count Interrupt Identifier */
-#define DCII           (1 << 2)        /* Down count Interrupt Identifier */
-#define MINCII         (1 << 3)        /* Min Count Interrupt Identifier */
-#define MAXCII         (1 << 4)        /* Max Count Interrupt Identifier */
-#define COV31II                (1 << 5)        /* Bit 31 Overflow Interrupt Identifier */
-#define COV15II                (1 << 6)        /* Bit 15 Overflow Interrupt Identifier */
-#define CZEROII                (1 << 7)        /* Count to Zero Interrupt Identifier */
-#define CZMII          (1 << 8)        /* CZM Pin Interrupt Identifier */
-#define CZMEII         (1 << 9)        /* CZM Error Interrupt Identifier */
-#define CZMZII         (1 << 10)       /* CZM Zeroes Counter Interrupt Identifier */
-
-/* CNT_COMMAND bitmasks */
-#define W1LCNT         0xf             /* Load Counter Register */
-#define W1LMIN         0xf0            /* Load Min Register */
-#define W1LMAX         0xf00           /* Load Max Register */
-#define W1ZMONCE       (1 << 12)       /* Enable CZM Clear Counter Once */
-
-#define W1LCNT_ZERO    (1 << 0)        /* write 1 to load CNT_COUNTER with zero */
-#define W1LCNT_MIN     (1 << 2)        /* write 1 to load CNT_COUNTER from CNT_MIN */
-#define W1LCNT_MAX     (1 << 3)        /* write 1 to load CNT_COUNTER from CNT_MAX */
-
-#define W1LMIN_ZERO    (1 << 4)        /* write 1 to load CNT_MIN with zero */
-#define W1LMIN_CNT     (1 << 5)        /* write 1 to load CNT_MIN from CNT_COUNTER */
-#define W1LMIN_MAX     (1 << 7)        /* write 1 to load CNT_MIN from CNT_MAX */
-
-#define W1LMAX_ZERO    (1 << 8)        /* write 1 to load CNT_MAX with zero */
-#define W1LMAX_CNT     (1 << 9)        /* write 1 to load CNT_MAX from CNT_COUNTER */
-#define W1LMAX_MIN     (1 << 10)       /* write 1 to load CNT_MAX from CNT_MIN */
-
-/* CNT_DEBOUNCE bitmasks */
-#define DPRESCALE      0xf             /* Load Counter Register */
-
-#endif
index 57701c3b8a591b35b60d88807c1d1f479b018423..90612a7f2cf32f0872af2651b608cd56eb0fff26 100644 (file)
@@ -27,7 +27,7 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a,b) ((a) == (b))
+#define segment_eq(a, b) ((a) == (b))
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
@@ -68,11 +68,11 @@ struct exception_table_entry {
  * use the right size if we just have the right pointer type.
  */
 
-#define put_user(x,p)                                          \
+#define put_user(x, p)                                         \
        ({                                                      \
                int _err = 0;                                   \
                typeof(*(p)) _x = (x);                          \
-               typeof(*(p)) __user *_p = (p);                          \
+               typeof(*(p)) __user *_p = (p);                  \
                if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\
                        _err = -EFAULT;                         \
                }                                               \
@@ -89,10 +89,10 @@ struct exception_table_entry {
                        break;                                  \
                case 8: {                                       \
                        long _xl, _xh;                          \
-                       _xl = ((long *)&_x)[0];                 \
-                       _xh = ((long *)&_x)[1];                 \
-                       __put_user_asm(_xl, ((long __user *)_p)+0, );   \
-                       __put_user_asm(_xh, ((long __user *)_p)+1, );   \
+                       _xl = ((__force long *)&_x)[0];         \
+                       _xh = ((__force long *)&_x)[1];         \
+                       __put_user_asm(_xl, ((__force long __user *)_p)+0, );\
+                       __put_user_asm(_xh, ((__force long __user *)_p)+1, );\
                } break;                                        \
                default:                                        \
                        _err = __put_user_bad();                \
@@ -102,7 +102,7 @@ struct exception_table_entry {
                _err;                                           \
        })
 
-#define __put_user(x,p) put_user(x,p)
+#define __put_user(x, p) put_user(x, p)
 static inline int bad_user_access_length(void)
 {
        panic("bad_user_access_length");
@@ -121,10 +121,10 @@ static inline int bad_user_access_length(void)
 
 #define __ptr(x) ((unsigned long __force *)(x))
 
-#define __put_user_asm(x,p,bhw)                                \
+#define __put_user_asm(x, p, bhw)                      \
        __asm__ (#bhw"[%1] = %0;\n\t"                   \
                 : /* no outputs */                     \
-                :"d" (x),"a" (__ptr(p)) : "memory")
+                :"d" (x), "a" (__ptr(p)) : "memory")
 
 #define get_user(x, ptr)                                       \
 ({                                                             \
@@ -136,10 +136,10 @@ static inline int bad_user_access_length(void)
                BUILD_BUG_ON(ptr_size >= 8);                    \
                switch (ptr_size) {                             \
                case 1:                                         \
-                       __get_user_asm(_val, _p, B,(Z));        \
+                       __get_user_asm(_val, _p, B, (Z));       \
                        break;                                  \
                case 2:                                         \
-                       __get_user_asm(_val, _p, W,(Z));        \
+                       __get_user_asm(_val, _p, W, (Z));       \
                        break;                                  \
                case 4:                                         \
                        __get_user_asm(_val, _p,  , );          \
@@ -147,11 +147,11 @@ static inline int bad_user_access_length(void)
                }                                               \
        } else                                                  \
                _err = -EFAULT;                                 \
-       x = (typeof(*(ptr)))_val;                               \
+       x = (__force typeof(*(ptr)))_val;                       \
        _err;                                                   \
 })
 
-#define __get_user(x,p) get_user(x,p)
+#define __get_user(x, p) get_user(x, p)
 
 #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
 
@@ -168,10 +168,10 @@ static inline int bad_user_access_length(void)
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\
+#define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\
                                                 return retval; })
 
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\
+#define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\
                                                    return retval; })
 
 static inline unsigned long __must_check
index 9501bd8d9cd193e94f5a7a1896fb3ab8b1cb27fe..68f2a8a806ead46ab1567d0c724c5109378da264 100644 (file)
@@ -666,7 +666,14 @@ static struct platform_device bfin_sport1_uart_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+       P_CNT_CUD,
+       P_CNT_CDG,
+       P_CNT_CZM,
+       0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -676,9 +683,15 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
        .debounce          = 10,        /* 0..17 */
        .mode              = ROT_QUAD_ENC | ROT_DEBE,
        .pm_wakeup         = 1,
+       .pin_list          = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index d64f565dc2a0aaafcbaf53af5f33b1738d7ef2f7..d4219e8e5ab865fb196d4689ed315bdf2d255e85 100644 (file)
@@ -1092,7 +1092,14 @@ static struct platform_device bfin_device_gpiokeys = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+       P_CNT_CUD,
+       P_CNT_CDG,
+       P_CNT_CZM,
+       0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -1102,9 +1109,15 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
        .debounce          = 10,        /* 0..17 */
        .mode              = ROT_QUAD_ENC | ROT_DEBE,
        .pm_wakeup         = 1,
+       .pin_list          = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index 1fe7ff286619f693c113faeac592ce3167d8de72..4204b9842532134e7a657bd0d7f9b5c8f1aeb7d7 100644 (file)
@@ -159,7 +159,7 @@ static struct platform_device bf54x_kpad_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -172,6 +172,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index e2c0b024ce88f2593f4551b711b3821d8ae3879e..7f9fc272ec30576827ad62922afb0ff799ab89c3 100644 (file)
@@ -75,7 +75,7 @@ static struct platform_device bfin_isp1760_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -87,6 +87,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index 93bcf2abd1a15d4df394ed2a6b2d203dcbea620a..07d7a7ef8bd59c7387328e6add1b6426b0b51a52 100644 (file)
@@ -123,12 +123,14 @@ extern unsigned long empty_zero_page;
 #define PGDIR_MASK             (~(PGDIR_SIZE - 1))
 #define PTRS_PER_PGD           64
 
+#define __PAGETABLE_PUD_FOLDED
 #define PUD_SHIFT              26
 #define PTRS_PER_PUD           1
 #define PUD_SIZE               (1UL << PUD_SHIFT)
 #define PUD_MASK               (~(PUD_SIZE - 1))
 #define PUE_SIZE               256
 
+#define __PAGETABLE_PMD_FOLDED
 #define PMD_SHIFT              26
 #define PMD_SIZE               (1UL << PMD_SHIFT)
 #define PMD_MASK               (~(PMD_SIZE - 1))
index a2320a4a00424a7448fe9a27767a07e37c27defe..4377c89a57f5a66062c956773cb538ffbcb2475c 100644 (file)
@@ -31,7 +31,7 @@ typedef struct {
 
 #define get_ds()               (KERNEL_DS)
 #define get_fs()               (__current_thread_info->addr_limit)
-#define segment_eq(a,b)                ((a).seg == (b).seg)
+#define segment_eq(a, b)       ((a).seg == (b).seg)
 #define __kernel_ds_p()                segment_eq(get_fs(), KERNEL_DS)
 #define get_addr_limit()       (get_fs().seg)
 
index 103bedc59644a65a90d76d51b85c586478ef0e01..4f3fb6ccbf2139b3e23798df6615e34ec5e4d09a 100644 (file)
@@ -169,10 +169,11 @@ do {                                                                      \
        (err) = ia64_getreg(_IA64_REG_R8);                              \
        (val) = ia64_getreg(_IA64_REG_R9);                              \
 } while (0)
-# define __put_user_size(val, addr, n, err)                                                    \
-do {                                                                                           \
-       __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val));    \
-       (err) = ia64_getreg(_IA64_REG_R8);                                                      \
+# define __put_user_size(val, addr, n, err)                            \
+do {                                                                   \
+       __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE,    \
+                 (__force unsigned long) (val));                       \
+       (err) = ia64_getreg(_IA64_REG_R8);                              \
 } while (0)
 #endif /* !ASM_SUPPORTED */
 
@@ -197,7 +198,7 @@ extern void __get_user_unknown (void);
                      case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break;  \
                      default: __get_user_unknown(); break;                             \
                }                                                                       \
-       (x) = (__typeof__(*(__gu_ptr))) __gu_val;                                       \
+       (x) = (__force __typeof__(*(__gu_ptr))) __gu_val;                               \
        __gu_err;                                                                       \
 })
 
index 8fd8ee70266a13cb61646b3b59530ee4181291cf..421e6ba3a173794d5a9cc8ba3b0465f2056a328f 100644 (file)
@@ -13,6 +13,7 @@
  * the M32R is two-level, so we don't really have any
  * PMD directory physically.
  */
+#define __PAGETABLE_PMD_FOLDED
 #define PMD_SHIFT      22
 #define PTRS_PER_PMD   1
 
index 84fe7ba53035dd3ec5b72c417fcab940b0427106..71adff209405e15b052e96d832ba5a26295bb98e 100644 (file)
@@ -54,7 +54,7 @@ static inline void set_fs(mm_segment_t s)
 
 #endif /* not CONFIG_MMU */
 
-#define segment_eq(a,b)        ((a).seg == (b).seg)
+#define segment_eq(a, b)       ((a).seg == (b).seg)
 
 #define __addr_ok(addr) \
        ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
@@ -68,7 +68,7 @@ static inline void set_fs(mm_segment_t s)
  *
  * This needs 33-bit arithmetic. We have a carry...
  */
-#define __range_ok(addr,size) ({                                       \
+#define __range_ok(addr, size) ({                                      \
        unsigned long flag, roksum;                                     \
        __chk_user_ptr(addr);                                           \
        asm (                                                           \
@@ -103,7 +103,7 @@ static inline void set_fs(mm_segment_t s)
  * this function, memory access functions may still return -EFAULT.
  */
 #ifdef CONFIG_MMU
-#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
+#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
 #else
 static inline int access_ok(int type, const void *addr, unsigned long size)
 {
@@ -167,8 +167,8 @@ extern int fixup_exception(struct pt_regs *regs);
  * Returns zero on success, or -EFAULT on error.
  * On error, the variable @x is set to zero.
  */
-#define get_user(x,ptr)                                                        \
-       __get_user_check((x),(ptr),sizeof(*(ptr)))
+#define get_user(x, ptr)                                                       \
+       __get_user_check((x), (ptr), sizeof(*(ptr)))
 
 /**
  * put_user: - Write a simple value into user space.
@@ -186,8 +186,8 @@ extern int fixup_exception(struct pt_regs *regs);
  *
  * Returns zero on success, or -EFAULT on error.
  */
-#define put_user(x,ptr)                                                        \
-       __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define put_user(x, ptr)                                                       \
+       __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
 /**
  * __get_user: - Get a simple variable from user space, with less checking.
@@ -209,41 +209,41 @@ extern int fixup_exception(struct pt_regs *regs);
  * Returns zero on success, or -EFAULT on error.
  * On error, the variable @x is set to zero.
  */
-#define __get_user(x,ptr) \
-       __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
-#define __get_user_nocheck(x,ptr,size)                                 \
+#define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        long __gu_err = 0;                                              \
        unsigned long __gu_val;                                         \
        might_fault();                                                  \
-       __get_user_size(__gu_val,(ptr),(size),__gu_err);                \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err);             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                                       \
 })
 
-#define __get_user_check(x,ptr,size)                                   \
+#define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
        long __gu_err = -EFAULT;                                        \
        unsigned long __gu_val = 0;                                     \
        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
        might_fault();                                                  \
-       if (access_ok(VERIFY_READ,__gu_addr,size))                      \
-               __get_user_size(__gu_val,__gu_addr,(size),__gu_err);    \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       if (access_ok(VERIFY_READ, __gu_addr, size))                    \
+               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                                       \
 })
 
 extern long __get_user_bad(void);
 
-#define __get_user_size(x,ptr,size,retval)                             \
+#define __get_user_size(x, ptr, size, retval)                          \
 do {                                                                   \
        retval = 0;                                                     \
        __chk_user_ptr(ptr);                                            \
        switch (size) {                                                 \
-         case 1: __get_user_asm(x,ptr,retval,"ub"); break;             \
-         case 2: __get_user_asm(x,ptr,retval,"uh"); break;             \
-         case 4: __get_user_asm(x,ptr,retval,""); break;               \
+         case 1: __get_user_asm(x, ptr, retval, "ub"); break;          \
+         case 2: __get_user_asm(x, ptr, retval, "uh"); break;          \
+         case 4: __get_user_asm(x, ptr, retval, ""); break;            \
          default: (x) = __get_user_bad();                              \
        }                                                               \
 } while (0)
@@ -288,26 +288,26 @@ do {                                                                      \
  *
  * Returns zero on success, or -EFAULT on error.
  */
-#define __put_user(x,ptr) \
-       __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
 
-#define __put_user_nocheck(x,ptr,size)                                 \
+#define __put_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        long __pu_err;                                                  \
        might_fault();                                                  \
-       __put_user_size((x),(ptr),(size),__pu_err);                     \
+       __put_user_size((x), (ptr), (size), __pu_err);                  \
        __pu_err;                                                       \
 })
 
 
-#define __put_user_check(x,ptr,size)                                   \
+#define __put_user_check(x, ptr, size)                                 \
 ({                                                                     \
        long __pu_err = -EFAULT;                                        \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
        might_fault();                                                  \
-       if (access_ok(VERIFY_WRITE,__pu_addr,size))                     \
-               __put_user_size((x),__pu_addr,(size),__pu_err);         \
+       if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
+               __put_user_size((x), __pu_addr, (size), __pu_err);      \
        __pu_err;                                                       \
 })
 
@@ -366,15 +366,15 @@ do {                                                                      \
 
 extern void __put_user_bad(void);
 
-#define __put_user_size(x,ptr,size,retval)                             \
+#define __put_user_size(x, ptr, size, retval)                          \
 do {                                                                   \
        retval = 0;                                                     \
        __chk_user_ptr(ptr);                                            \
        switch (size) {                                                 \
-         case 1: __put_user_asm(x,ptr,retval,"b"); break;              \
-         case 2: __put_user_asm(x,ptr,retval,"h"); break;              \
-         case 4: __put_user_asm(x,ptr,retval,""); break;               \
-         case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
+         case 1: __put_user_asm(x, ptr, retval, "b"); break;           \
+         case 2: __put_user_asm(x, ptr, retval, "h"); break;           \
+         case 4: __put_user_asm(x, ptr, retval, ""); break;            \
+         case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\
          default: __put_user_bad();                                    \
        }                                                               \
 } while (0)
@@ -421,7 +421,7 @@ struct __large_struct { unsigned long buf[100]; };
 
 /* Generic arbitrary sized copy.  */
 /* Return the number of bytes NOT copied.  */
-#define __copy_user(to,from,size)                                      \
+#define __copy_user(to, from, size)                                    \
 do {                                                                   \
        unsigned long __dst, __src, __c;                                \
        __asm__ __volatile__ (                                          \
@@ -478,7 +478,7 @@ do {                                                                        \
                : "r14", "memory");                                     \
 } while (0)
 
-#define __copy_user_zeroing(to,from,size)                              \
+#define __copy_user_zeroing(to, from, size)                            \
 do {                                                                   \
        unsigned long __dst, __src, __c;                                \
        __asm__ __volatile__ (                                          \
@@ -548,14 +548,14 @@ do {                                                                      \
 static inline unsigned long __generic_copy_from_user_nocheck(void *to,
        const void __user *from, unsigned long n)
 {
-       __copy_user_zeroing(to,from,n);
+       __copy_user_zeroing(to, from, n);
        return n;
 }
 
 static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
        const void *from, unsigned long n)
 {
-       __copy_user(to,from,n);
+       __copy_user(to, from, n);
        return n;
 }
 
@@ -576,8 +576,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
  */
-#define __copy_to_user(to,from,n)                      \
-       __generic_copy_to_user_nocheck((to),(from),(n))
+#define __copy_to_user(to, from, n)                    \
+       __generic_copy_to_user_nocheck((to), (from), (n))
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
@@ -595,10 +595,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
  */
-#define copy_to_user(to,from,n)                                \
+#define copy_to_user(to, from, n)                      \
 ({                                                     \
        might_fault();                                  \
-       __generic_copy_to_user((to),(from),(n));        \
+       __generic_copy_to_user((to), (from), (n));      \
 })
 
 /**
@@ -617,8 +617,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * If some data could not be copied, this function will pad the copied
  * data to the requested size using zero bytes.
  */
-#define __copy_from_user(to,from,n)                    \
-       __generic_copy_from_user_nocheck((to),(from),(n))
+#define __copy_from_user(to, from, n)                  \
+       __generic_copy_from_user_nocheck((to), (from), (n))
 
 /**
  * copy_from_user: - Copy a block of data from user space.
@@ -636,10 +636,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * If some data could not be copied, this function will pad the copied
  * data to the requested size using zero bytes.
  */
-#define copy_from_user(to,from,n)                      \
+#define copy_from_user(to, from, n)                    \
 ({                                                     \
        might_fault();                                  \
-       __generic_copy_from_user((to),(from),(n));      \
+       __generic_copy_from_user((to), (from), (n));    \
 })
 
 long __must_check strncpy_from_user(char *dst, const char __user *src,
index 28a145bfbb7151a567dd825026eac10f07845e74..35ed4a9981aefb627ed785ce311198c59000c396 100644 (file)
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE   512
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   1024
 #else
index 0fa80e97ed2de8ca503e3cb1e92f0f33d15dc0cb..98216b8111f08b764abcff1a472c26c54064b6bb 100644 (file)
@@ -58,7 +58,7 @@ static inline mm_segment_t get_ds(void)
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
 #endif
 
-#define segment_eq(a,b)        ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)
 
 #endif /* __ASSEMBLY__ */
 
index 15901db435b90675dd5ab9f931dc331632c04705..d228601b3afce762cd94360240e6126647fdf6a8 100644 (file)
@@ -128,25 +128,25 @@ asm volatile ("\n"                                        \
 #define put_user(x, ptr)       __put_user(x, ptr)
 
 
-#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({    \
-       type __gu_val;                                          \
-       asm volatile ("\n"                                      \
-               "1:     "MOVES"."#bwl"  %2,%1\n"                \
-               "2:\n"                                          \
-               "       .section .fixup,\"ax\"\n"               \
-               "       .even\n"                                \
-               "10:    move.l  %3,%0\n"                        \
-               "       sub.l   %1,%1\n"                        \
-               "       jra     2b\n"                           \
-               "       .previous\n"                            \
-               "\n"                                            \
-               "       .section __ex_table,\"a\"\n"            \
-               "       .align  4\n"                            \
-               "       .long   1b,10b\n"                       \
-               "       .previous"                              \
-               : "+d" (res), "=&" #reg (__gu_val)              \
-               : "m" (*(ptr)), "i" (err));                     \
-       (x) = (typeof(*(ptr)))(unsigned long)__gu_val;          \
+#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({            \
+       type __gu_val;                                                  \
+       asm volatile ("\n"                                              \
+               "1:     "MOVES"."#bwl"  %2,%1\n"                        \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .even\n"                                        \
+               "10:    move.l  %3,%0\n"                                \
+               "       sub.l   %1,%1\n"                                \
+               "       jra     2b\n"                                   \
+               "       .previous\n"                                    \
+               "\n"                                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  4\n"                                    \
+               "       .long   1b,10b\n"                               \
+               "       .previous"                                      \
+               : "+d" (res), "=&" #reg (__gu_val)                      \
+               : "m" (*(ptr)), "i" (err));                             \
+       (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;  \
 })
 
 #define __get_user(x, ptr)                                             \
@@ -188,7 +188,7 @@ asm volatile ("\n"                                  \
                          "+a" (__gu_ptr)                               \
                        : "i" (-EFAULT)                                 \
                        : "memory");                                    \
-               (x) = (typeof(*(ptr)))__gu_val;                         \
+               (x) = (__force typeof(*(ptr)))__gu_val;                 \
                break;                                                  \
            }   */                                                      \
        default:                                                        \
index 881071c0794221475308604464cd7bd441b95945..13272fd5a5baec8e3b1a4de778a6982abf0adae7 100644 (file)
@@ -149,8 +149,8 @@ extern void exit_thread(void);
 
 unsigned long get_wchan(struct task_struct *p);
 
-#define        KSTK_EIP(tsk)   ((tsk)->thread.kernel_context->CurrPC)
-#define        KSTK_ESP(tsk)   ((tsk)->thread.kernel_context->AX[0].U0)
+#define        KSTK_EIP(tsk)   (task_pt_regs(tsk)->ctx.CurrPC)
+#define        KSTK_ESP(tsk)   (task_pt_regs(tsk)->ctx.AX[0].U0)
 
 #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
 
index 0748b0a9798684c40eb02f5782472862245d9b78..8282cbce7e399a84488e675af0751341d24f0205 100644 (file)
@@ -107,18 +107,23 @@ extern long __put_user_asm_w(unsigned int x, void __user *addr);
 extern long __put_user_asm_d(unsigned int x, void __user *addr);
 extern long __put_user_asm_l(unsigned long long x, void __user *addr);
 
-#define __put_user_size(x, ptr, size, retval)                  \
-do {                                                            \
-       retval = 0;                                             \
-       switch (size) {                                         \
+#define __put_user_size(x, ptr, size, retval)                          \
+do {                                                                    \
+       retval = 0;                                                     \
+       switch (size) {                                                 \
        case 1:                                                         \
-               retval = __put_user_asm_b((unsigned int)x, ptr); break; \
+               retval = __put_user_asm_b((__force unsigned int)x, ptr);\
+               break;                                                  \
        case 2:                                                         \
-               retval = __put_user_asm_w((unsigned int)x, ptr); break; \
+               retval = __put_user_asm_w((__force unsigned int)x, ptr);\
+               break;                                                  \
        case 4:                                                         \
-               retval = __put_user_asm_d((unsigned int)x, ptr); break; \
+               retval = __put_user_asm_d((__force unsigned int)x, ptr);\
+               break;                                                  \
        case 8:                                                         \
-               retval = __put_user_asm_l((unsigned long long)x, ptr); break; \
+               retval = __put_user_asm_l((__force unsigned long long)x,\
+                                         ptr);                         \
+               break;                                                  \
        default:                                                        \
                __put_user_bad();                                       \
        }                                                               \
@@ -135,7 +140,7 @@ extern long __get_user_bad(void);
 ({                                                              \
        long __gu_err, __gu_val;                                \
        __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;             \
        __gu_err;                                               \
 })
 
@@ -145,7 +150,7 @@ extern long __get_user_bad(void);
        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
        if (access_ok(VERIFY_READ, __gu_addr, size))                    \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                                       \
 })
 
index 843713c05b79fe69f8bbc057a17a5e200dfc54da..c7a16904cd03c705333f645419ec07888fc6fd87 100644 (file)
@@ -54,6 +54,7 @@ config MIPS
        select CPU_PM if CPU_IDLE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_BINFMT_ELF_STATE
+       select SYSCTL_EXCEPTION_TRACE
 
 menu "Machine selection"
 
@@ -376,8 +377,10 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS32_R3_5
+       select SYS_HAS_CPU_MIPS32_R6
        select SYS_HAS_CPU_MIPS64_R1
        select SYS_HAS_CPU_MIPS64_R2
+       select SYS_HAS_CPU_MIPS64_R6
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_SUPPORTS_32BIT_KERNEL
@@ -1033,6 +1036,9 @@ config MIPS_MACHINE
 config NO_IOPORT_MAP
        def_bool n
 
+config GENERIC_CSUM
+       bool
+
 config GENERIC_ISA_DMA
        bool
        select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1146,6 +1152,9 @@ config SOC_PNX8335
        bool
        select SOC_PNX833X
 
+config MIPS_SPRAM
+       bool
+
 config SWAP_IO_SPACE
        bool
 
@@ -1304,6 +1313,22 @@ config CPU_MIPS32_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
+config CPU_MIPS32_R6
+       bool "MIPS32 Release 6 (EXPERIMENTAL)"
+       depends on SYS_HAS_CPU_MIPS32_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       select HAVE_KVM
+       select MIPS_O32_FP64_SUPPORT
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.  New MIPS processors, starting with the Warrior
+         family, are based on a MIPS32r6 processor. If you own an older
+         processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
 config CPU_MIPS64_R1
        bool "MIPS64 Release 1"
        depends on SYS_HAS_CPU_MIPS64_R1
@@ -1339,6 +1364,21 @@ config CPU_MIPS64_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
+config CPU_MIPS64_R6
+       bool "MIPS64 Release 6 (EXPERIMENTAL)"
+       depends on SYS_HAS_CPU_MIPS64_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_64BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS64 architecture.  New MIPS processors, starting with the Warrior
+         family, are based on a MIPS64r6 processor. If you own an older
+         processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
+
 config CPU_R3000
        bool "R3000"
        depends on SYS_HAS_CPU_R3000
@@ -1539,7 +1579,7 @@ endchoice
 config CPU_MIPS32_3_5_FEATURES
        bool "MIPS32 Release 3.5 Features"
        depends on SYS_HAS_CPU_MIPS32_R3_5
-       depends on CPU_MIPS32_R2
+       depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture including features from the 3.5 release such as
@@ -1659,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2
 config SYS_HAS_CPU_MIPS32_R3_5
        bool
 
+config SYS_HAS_CPU_MIPS32_R6
+       bool
+
 config SYS_HAS_CPU_MIPS64_R1
        bool
 
 config SYS_HAS_CPU_MIPS64_R2
        bool
 
+config SYS_HAS_CPU_MIPS64_R6
+       bool
+
 config SYS_HAS_CPU_R3000
        bool
 
@@ -1764,11 +1810,11 @@ endmenu
 #
 config CPU_MIPS32
        bool
-       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
 
 config CPU_MIPS64
        bool
-       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
 
 #
 # These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1780,6 +1826,12 @@ config CPU_MIPSR1
 config CPU_MIPSR2
        bool
        default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+       select MIPS_SPRAM
+
+config CPU_MIPSR6
+       bool
+       default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+       select MIPS_SPRAM
 
 config EVA
        bool
@@ -2013,6 +2065,19 @@ config MIPS_MT_FPAFF
        default y
        depends on MIPS_MT_SMP
 
+config MIPSR2_TO_R6_EMULATOR
+       bool "MIPS R2-to-R6 emulator"
+       depends on CPU_MIPSR6 && !SMP
+       default y
+       help
+         Choose this option if you want to run non-R6 MIPS userland code.
+         Even if you say 'Y' here, the emulator will still be disabled by
+         default. You can enable it using the 'mipsr2emul' kernel option.
+         The only reason this is a build-time option is to save ~14K from the
+         final kernel image.
+comment "MIPS R2-to-R6 emulator is only available for UP kernels"
+       depends on SMP && CPU_MIPSR6
+
 config MIPS_VPE_LOADER
        bool "VPE loader support."
        depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2148,7 +2213,7 @@ config CPU_HAS_SMARTMIPS
          here.
 
 config CPU_MICROMIPS
-       depends on 32BIT && SYS_SUPPORTS_MICROMIPS
+       depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
        bool "microMIPS"
        help
          When this option is enabled the kernel will be built using the
index 88a9f433f6fc3ca7affc6c9a011622d26e10ada9..3a2b775e845893513e2ab187ca95955e0be17848 100644 (file)
@@ -122,17 +122,4 @@ config SPINLOCK_TEST
        help
          Add several files to the debugfs to test spinlock speed.
 
-config FP32XX_HYBRID_FPRS
-       bool "Run FP32 & FPXX code with hybrid FPRs"
-       depends on MIPS_O32_FP64_SUPPORT
-       help
-         The hybrid FPR scheme is normally used only when a program needs to
-         execute a mix of FP32 & FP64A code, since the trapping & emulation
-         that it entails is expensive. When enabled, this option will lead
-         to the kernel running programs which use the FP32 & FPXX FP ABIs
-         using the hybrid FPR scheme, which can be useful for debugging
-         purposes.
-
-         If unsure, say N.
-
 endmenu
index 2563a088d3b867037fa4f46332ca010f6984547d..8f57fc72d62c8334d35e91ca48f1aa6ead7d08d4 100644 (file)
@@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
-# For smartmips configurations, there are hundreds of warnings due to ISA overrides
-# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
-# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
-# similar directives in the kernel will spam the build logs with the following warnings:
-# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
-# or
-# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
-# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
-# been fixed properly.
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips) -Wa,--no-warn
-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
-
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
-
-ifeq ($(CONFIG_CPU_HAS_MSA),y)
-toolchain-msa  := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
-cflags-$(toolchain-msa)                += -DTOOLCHAIN_SUPPORTS_MSA
-endif
-
 #
 # CPU-dependent compiler/assembler options for optimization.
 #
@@ -156,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1)    += $(call cc-option,-march=mips32,-mips32 -U_MIPS
                        -Wa,-mips32 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
                        -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)     += -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)     += $(call cc-option,-march=r5400,-march=r5000) \
                        -Wa,--trap
@@ -182,6 +166,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
 endif
 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
 cflags-$(CONFIG_CPU_BMIPS)     += -march=mips32 -Wa,-mips32 -Wa,--trap
+#
+# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+# as MIPS64 R1; older versions as just R1.  This leaves the possibility open
+# that GCC might generate R2 code for -march=loongson3a which then is rejected
+# by GAS.  The cc-option can't probe for this behaviour so -march=loongson3a
+# can't easily be used safely within the kbuild framework.
+#
+cflags-$(CONFIG_CPU_LOONGSON3)  +=                                     \
+       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+       -Wa,-mips64r2 -Wa,--trap
 
 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
@@ -194,6 +188,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
 endif
 endif
 
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+mips-cflags                            := "$(cflags-y)"
+cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
+cflags-$(CONFIG_CPU_MICROMIPS)         += $(call cc-option,$(mips-cflags),-mmicromips)
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa                          := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
+cflags-$(toolchain-msa)                        += -DTOOLCHAIN_SUPPORTS_MSA
+endif
+
 #
 # Firmware support
 #
@@ -287,7 +298,11 @@ boot-y                     += vmlinux.ecoff
 boot-y                 += vmlinux.srec
 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
 boot-y                 += uImage
+boot-y                 += uImage.bin
+boot-y                 += uImage.bz2
 boot-y                 += uImage.gz
+boot-y                 += uImage.lzma
+boot-y                 += uImage.lzo
 endif
 
 # compressed boot image targets (arch/mips/boot/compressed/)
@@ -386,7 +401,11 @@ define archhelp
        echo '  vmlinuz.bin          - Raw binary zboot image'
        echo '  vmlinuz.srec         - SREC zboot image'
        echo '  uImage               - U-Boot image'
+       echo '  uImage.bin           - U-Boot image (uncompressed)'
+       echo '  uImage.bz2           - U-Boot image (bz2)'
        echo '  uImage.gz            - U-Boot image (gzip)'
+       echo '  uImage.lzma          - U-Boot image (lzma)'
+       echo '  uImage.lzo           - U-Boot image (lzo)'
        echo '  dtbs                 - Device-tree blobs for enabled boards'
        echo
        echo '  These will be default as appropriate for a configured platform.'
index 48a9dfc55b51aa4a3819bae6c686b0cd5f38a7f0..6a98d2cb402ccb9c69ad505d819ed3f82f6e4932 100644 (file)
@@ -127,12 +127,20 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
                t = 396000000;
        else {
                t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
+               if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
+                       t &= 0x3f;
                t *= parent_rate;
        }
 
        return t;
 }
 
+void __init alchemy_set_lpj(void)
+{
+       preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
+       preset_lpj /= 2 * HZ;
+}
+
 static struct clk_ops alchemy_clkops_cpu = {
        .recalc_rate    = alchemy_clk_cpu_recalc,
 };
@@ -315,17 +323,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
 
 /* lrclk: external synchronous static bus clock ***********************/
 
-static struct clk __init *alchemy_clk_setup_lrclk(const char *pn)
+static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
 {
-       /* MEM_STCFG0[15:13] = divisor.
+       /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
+        * otherwise lrclk=pclk/4.
+        * All other variants: MEM_STCFG0[15:13] = divisor.
         * L/RCLK = periph_clk / (divisor + 1)
         * On Au1000, Au1500, Au1100 it's called LCLK,
         * on later models it's called RCLK, but it's the same thing.
         */
        struct clk *c;
-       unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13;
+       unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
 
-       v = (v & 7) + 1;
+       switch (t) {
+       case ALCHEMY_CPU_AU1000:
+       case ALCHEMY_CPU_AU1500:
+               v = 4 + ((v >> 11) & 1);
+               break;
+       default:        /* all other models */
+               v = ((v >> 13) & 7) + 1;
+       }
        c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
                                      pn, 0, 1, v);
        if (!IS_ERR(c))
@@ -546,6 +563,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
 }
 
 static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -678,6 +697,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
 }
 
 static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -897,6 +918,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
 }
 
 static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -1060,7 +1083,7 @@ static int __init alchemy_clk_init(void)
        ERRCK(c)
 
        /* L/RCLK: external static bus clock for synchronous mode */
-       c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK);
+       c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
        ERRCK(c)
 
        /* Frequency dividers 0-5 */
index 4e72daf12c325063a62da8c9255cee504a4e5fd4..2902138b3e0f56f639896c571e5bc87172f74d2f 100644 (file)
 #include <au1000.h>
 
 extern void __init board_setup(void);
-extern void set_cpuspec(void);
+extern void __init alchemy_set_lpj(void);
 
 void __init plat_mem_setup(void)
 {
+       alchemy_set_lpj();
+
        if (au1xxx_cpu_needs_config_od())
                /* Various early Au1xx0 errata corrected by this */
                set_c0_config(1 << 19); /* Set Config[OD] */
index 0fb5134fb83247a2395162ffc3b93597cc7d7614..fd94fe849af680f3edc24a3dcb184c308fa0613e 100644 (file)
@@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node,
 
 static struct of_device_id of_irq_ids[] __initdata = {
        { .compatible = "mti,cpu-interrupt-controller",
-         .data = mips_cpu_intc_init },
+         .data = mips_cpu_irq_of_init },
        { .compatible = "brcm,bcm3384-intc",
          .data = intc_of_init },
        {},
index 1466c00260936c7e387877c8c9e296d430cd6240..acb1988f354edc58072399a076656c0f2ffd149e 100644 (file)
@@ -23,6 +23,12 @@ strip-flags   := $(addprefix --remove-section=,$(drop-sections))
 
 hostprogs-y := elf2ecoff
 
+suffix-y                       := bin
+suffix-$(CONFIG_KERNEL_BZIP2)  := bz2
+suffix-$(CONFIG_KERNEL_GZIP)   := gz
+suffix-$(CONFIG_KERNEL_LZMA)   := lzma
+suffix-$(CONFIG_KERNEL_LZO)    := lzo
+
 targets := vmlinux.ecoff
 quiet_cmd_ecoff = ECOFF          $@
       cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
@@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE
 UIMAGE_LOADADDR  = $(VMLINUX_LOAD_ADDRESS)
 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
 
+#
+# Compressed vmlinux images
+#
+
+extra-y += vmlinux.bin.bz2
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
+extra-y += vmlinux.bin.lzo
+
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,bzip2)
+
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
 
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzma)
+
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzo)
+
+#
+# Compressed u-boot images
+#
+
+targets += uImage
+targets += uImage.bin
+targets += uImage.bz2
 targets += uImage.gz
+targets += uImage.lzma
+targets += uImage.lzo
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,uimage,none)
+
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
+       $(call if_changed,uimage,bzip2)
+
 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-targets += uImage
-$(obj)/uImage: $(obj)/uImage.gz FORCE
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+       $(call if_changed,uimage,lzma)
+
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
+       $(call if_changed,uimage,lzo)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
        @echo '  Image $@ is ready'
index 2a4c52e27f416e146e5c268edad9fd867e79c5fe..266c8137e859d418faed5e6fa3a8549b0aeff9df 100644 (file)
@@ -268,7 +268,6 @@ int main(int argc, char *argv[])
        Elf32_Ehdr ex;
        Elf32_Phdr *ph;
        Elf32_Shdr *sh;
-       char *shstrtab;
        int i, pad;
        struct sect text, data, bss;
        struct filehdr efh;
@@ -336,9 +335,6 @@ int main(int argc, char *argv[])
                                     "sh");
        if (must_convert_endian)
                convert_elf_shdrs(sh, ex.e_shnum);
-       /* Read in the section string table. */
-       shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset,
-                           sh[ex.e_shstrndx].sh_size, "shstrtab");
 
        /* Figure out if we can cram the program header into an ECOFF
           header...  Basically, we can't handle anything but loadable
index b752c4ed0b797938c6ff58e7e1583982cdbdd5ac..1882e6475dd093d546c70cf9f7345f8121589921 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-ipd-defs.h>
 #include <asm/octeon/cvmx-mio-defs.h>
-
+#include <asm/octeon/cvmx-rst-defs.h>
 
 static u64 f;
 static u64 rdiv;
@@ -39,11 +39,20 @@ void __init octeon_setup_delays(void)
 
        if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
                union cvmx_mio_rst_boot rst_boot;
+
                rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
                rdiv = rst_boot.s.c_mul;        /* CPU clock */
                sdiv = rst_boot.s.pnr_mul;      /* I/O clock */
                f = (0x8000000000000000ull / sdiv) * 2;
+       } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
+               union cvmx_rst_boot rst_boot;
+
+               rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+               rdiv = rst_boot.s.c_mul;        /* CPU clock */
+               sdiv = rst_boot.s.pnr_mul;      /* I/O clock */
+               f = (0x8000000000000000ull / sdiv) * 2;
        }
+
 }
 
 /*
index 3778655c4a375215fddea8fd0969960d096d0d9c..7d8987818ccf51ed6aa82463fbe3de5dd599557e 100644 (file)
@@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void)
                        continue;
 
                /* These addresses map low for PCI. */
-               if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX))
+               if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
                        continue;
 
                addr_size += e->size;
@@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void)
 #endif
 #ifdef CONFIG_USB_OCTEON_OHCI
        /* OCTEON II ohci is only 32-bit. */
-       if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul)
+       if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
                swiotlbsize = 64 * (1<<20);
 #endif
        swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
index 5dfef84b95767502a8cf9f5b4578b6f97a3bab76..9eb0feef441721362ee5e699e2788c1ffaf3b391 100644 (file)
@@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
                break;
        }
        /* Most boards except NIC10e use a 12MHz crystal */
-       if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+       if (OCTEON_IS_OCTEON2())
                return USB_CLOCK_TYPE_CRYSTAL_12;
        return USB_CLOCK_TYPE_REF_48;
 }
index 2bc4aa95944e462d84673bb974e2dde119fb6bdf..10f762557b925d419de87351836f25db4004f04f 100644 (file)
@@ -3,12 +3,14 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2012 Cavium, Inc.
+ * Copyright (C) 2004-2014 Cavium, Inc.
  */
 
+#include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/bitops.h>
+#include <linux/of_irq.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
@@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
 
+struct octeon_irq_ciu_domain_data {
+       int num_sum;  /* number of sum registers (2 or 3). */
+};
+
 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
 
-union octeon_ciu_chip_data {
-       void *p;
-       unsigned long l;
-       struct {
-               unsigned long line:6;
-               unsigned long bit:6;
-               unsigned long gpio_line:6;
-       } s;
+struct octeon_ciu_chip_data {
+       union {
+               struct {                /* only used for ciu3 */
+                       u64 ciu3_addr;
+                       unsigned int intsn;
+               };
+               struct {                /* only used for ciu/ciu2 */
+                       u8 line;
+                       u8 bit;
+                       u8 gpio_line;
+               };
+       };
+       int current_cpu;        /* Next CPU expected to take this irq */
 };
 
 struct octeon_core_chip_data {
@@ -45,27 +56,40 @@ struct octeon_core_chip_data {
 
 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
 
-static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
-                                      struct irq_chip *chip,
-                                      irq_flow_handler_t handler)
+static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
+                                     struct irq_chip *chip,
+                                     irq_flow_handler_t handler)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       if (!cd)
+               return -ENOMEM;
 
        irq_set_chip_and_handler(irq, chip, handler);
 
-       cd.l = 0;
-       cd.s.line = line;
-       cd.s.bit = bit;
-       cd.s.gpio_line = gpio_line;
+       cd->line = line;
+       cd->bit = bit;
+       cd->gpio_line = gpio_line;
 
-       irq_set_chip_data(irq, cd.p);
+       irq_set_chip_data(irq, cd);
        octeon_irq_ciu_to_irq[line][bit] = irq;
+       return 0;
 }
 
-static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
-                                        int irq, int line, int bit)
+static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
 {
-       irq_domain_associate(domain, irq, line << 6 | bit);
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+       irq_set_chip_data(irq, NULL);
+       kfree(cd);
+}
+
+static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+                                       int irq, int line, int bit)
+{
+       return irq_domain_associate(domain, irq, line << 6 | bit);
 }
 
 static int octeon_coreid_for_cpu(int cpu)
@@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data)
 #ifdef CONFIG_SMP
        int cpu;
        int weight = cpumask_weight(data->affinity);
+       struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
        if (weight > 1) {
-               cpu = smp_processor_id();
+               cpu = cd->current_cpu;
                for (;;) {
                        cpu = cpumask_next(cpu, data->affinity);
                        if (cpu >= nr_cpu_ids) {
@@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data)
        } else {
                cpu = smp_processor_id();
        }
+       cd->current_cpu = cpu;
        return cpu;
 #else
        return smp_processor_id();
@@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
        int coreid = octeon_coreid_for_cpu(cpu);
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
        } else {
                pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
 {
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
        } else {
                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
 {
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
        } else {
                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data)
        unsigned long flags;
        unsigned long *pen;
        int cpu;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        for_each_online_cpu(cpu) {
                int coreid = octeon_coreid_for_cpu(cpu);
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                raw_spin_lock_irqsave(lock, flags);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
                 */
                wmb();
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data)
        unsigned long flags;
        unsigned long *pen;
        int cpu;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        for_each_online_cpu(cpu) {
                int coreid = octeon_coreid_for_cpu(cpu);
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                raw_spin_lock_irqsave(lock, flags);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
                 */
                wmb();
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -397,26 +423,87 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 {
        u64 mask;
        int cpu = next_cpu_for_irq(data);
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
        /*
         * Called under the desc lock, so these should never get out
         * of sync.
         */
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = octeon_coreid_for_cpu(cpu) * 2;
-               set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+               set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
        } else {
                int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-               set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+               set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
        }
 }
 
+/*
+ * Enable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+}
+
+/*
+ * Disable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+}
+
+static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
+}
+
+static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
+{
+       int cpu;
+       struct octeon_ciu_chip_data *cd;
+       u64 mask;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       for_each_online_cpu(cpu) {
+               int coreid = octeon_coreid_for_cpu(cpu);
+
+               cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
+       }
+}
+
 /*
  * Enable the irq on the current CPU for chips that
  * have the EN*_W1{S,C} registers.
@@ -424,18 +511,18 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
-               set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+               set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
        } else {
                int index = cvmx_get_core_num() * 2 + 1;
-               set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+               set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
        }
 }
@@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
-               clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+               clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
        } else {
                int index = cvmx_get_core_num() * 2 + 1;
-               clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+               clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
        }
 }
@@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 static void octeon_irq_ciu_ack(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
                cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
        } else {
@@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2;
-                       clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+                       clear_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
                }
        } else {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-                       clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+                       clear_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
                }
        }
@@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2;
-                       set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+                       set_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
                }
        } else {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-                       set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+                       set_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
                }
        }
@@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 static void octeon_irq_gpio_setup(struct irq_data *data)
 {
        union cvmx_gpio_bit_cfgx cfg;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        u32 t = irqd_get_trigger_type(data);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        cfg.u64 = 0;
        cfg.s.int_en = 1;
@@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data)
        cfg.s.fil_cnt = 7;
        cfg.s.fil_sel = 3;
 
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
 }
 
 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
@@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
 
 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cd = irq_data_get_irq_chip_data(data);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu_disable_all_v2(data);
 }
 
 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cd = irq_data_get_irq_chip_data(data);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu_disable_all(data);
 }
 
 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        u64 mask;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.gpio_line);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->gpio_line);
 
        cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
+static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
 {
        if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
                handle_edge_irq(irq, desc);
@@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        unsigned long *pen;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        /*
         * For non-v2 CIU, we will allow only single CPU affinity.
@@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
                raw_spin_lock_irqsave(lock, flags);
 
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
                        enable_one = 0;
-                       __set_bit(cd.s.bit, pen);
+                       __set_bit(cd->bit, pen);
                } else {
-                       __clear_bit(cd.s.bit, pen);
+                       __clear_bit(cd->bit, pen);
                }
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                 */
                wmb();
 
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
        if (!enable_one)
                return 0;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << cd.s.bit;
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                        int index = octeon_coreid_for_cpu(cpu) * 2;
                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
                                enable_one = false;
-                               set_bit(cd.s.bit, pen);
+                               set_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
                        } else {
-                               clear_bit(cd.s.bit, pen);
+                               clear_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
                        }
                }
@@ -733,22 +824,62 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
                                enable_one = false;
-                               set_bit(cd.s.bit, pen);
+                               set_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
                        } else {
-                               clear_bit(cd.s.bit, pen);
+                               clear_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
                        }
                }
        }
        return 0;
 }
+
+static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
+                                           const struct cpumask *dest,
+                                           bool force)
+{
+       int cpu;
+       bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+       u64 mask;
+       struct octeon_ciu_chip_data *cd;
+
+       if (!enable_one)
+               return 0;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
+
+       for_each_online_cpu(cpu) {
+               int index = octeon_coreid_for_cpu(cpu);
+
+               if (cpumask_test_cpu(cpu, dest) && enable_one) {
+                       enable_one = false;
+                       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+               } else {
+                       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+               }
+       }
+       return 0;
+}
 #endif
 
 /*
  * Newer octeon chips have support for lockless CIU operation.
  */
 static struct irq_chip octeon_irq_chip_ciu_v2 = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_v2,
+       .irq_disable = octeon_irq_ciu_disable_all_v2,
+       .irq_mask = octeon_irq_ciu_disable_local_v2,
+       .irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
        .name = "CIU",
        .irq_enable = octeon_irq_ciu_enable_v2,
        .irq_disable = octeon_irq_ciu_disable_all_v2,
@@ -761,7 +892,47 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
 #endif
 };
 
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_sum2 = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_sum2,
+       .irq_disable = octeon_irq_ciu_disable_all_sum2,
+       .irq_mask = octeon_irq_ciu_disable_local_sum2,
+       .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_sum2,
+       .irq_disable = octeon_irq_ciu_disable_all_sum2,
+       .irq_ack = octeon_irq_ciu_ack_sum2,
+       .irq_mask = octeon_irq_ciu_disable_local_sum2,
+       .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
 static struct irq_chip octeon_irq_chip_ciu = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable,
+       .irq_disable = octeon_irq_ciu_disable_all,
+       .irq_mask = octeon_irq_ciu_disable_local,
+       .irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_edge = {
        .name = "CIU",
        .irq_enable = octeon_irq_ciu_enable,
        .irq_disable = octeon_irq_ciu_disable_all,
@@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
                               unsigned int *out_type)
 {
        unsigned int ciu, bit;
+       struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
        ciu = intspec[0];
        bit = intspec[1];
 
-       if (ciu > 1 || bit > 63)
+       if (ciu >= dd->num_sum || bit > 63)
                return -EINVAL;
 
        *out_hwirq = (ciu << 6) | bit;
@@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
 }
 
 static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_ciu_chip_edge;
 static struct irq_chip *octeon_irq_gpio_chip;
 
 static bool octeon_irq_virq_in_range(unsigned int virq)
@@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq)
 static int octeon_irq_ciu_map(struct irq_domain *d,
                              unsigned int virq, irq_hw_number_t hw)
 {
+       int rv;
        unsigned int line = hw >> 6;
        unsigned int bit = hw & 63;
+       struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
@@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
        if (line == 0 && bit >= 16 && bit <32)
                return 0;
 
-       if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
+       if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
-       if (octeon_irq_ciu_is_edge(line, bit))
-               octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          octeon_irq_ciu_chip,
-                                          handle_edge_irq);
-       else
-               octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          octeon_irq_ciu_chip,
-                                          handle_level_irq);
-
-       return 0;
+       if (line == 2) {
+               if (octeon_irq_ciu_is_edge(line, bit))
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               &octeon_irq_chip_ciu_sum2_edge,
+                               handle_edge_irq);
+               else
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               &octeon_irq_chip_ciu_sum2,
+                               handle_level_irq);
+       } else {
+               if (octeon_irq_ciu_is_edge(line, bit))
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               octeon_irq_ciu_chip_edge,
+                               handle_edge_irq);
+               else
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               octeon_irq_ciu_chip,
+                               handle_level_irq);
+       }
+       return rv;
 }
 
-static int octeon_irq_gpio_map_common(struct irq_domain *d,
-                                     unsigned int virq, irq_hw_number_t hw,
-                                     int line_limit, struct irq_chip *chip)
+static int octeon_irq_gpio_map(struct irq_domain *d,
+                              unsigned int virq, irq_hw_number_t hw)
 {
        struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
        unsigned int line, bit;
+       int r;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
        line = (hw + gpiod->base_hwirq) >> 6;
        bit = (hw + gpiod->base_hwirq) & 63;
-       if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
+       if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+               octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
-       octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-                                  chip, octeon_irq_handle_gpio);
-       return 0;
-}
-
-static int octeon_irq_gpio_map(struct irq_domain *d,
-                              unsigned int virq, irq_hw_number_t hw)
-{
-       return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
+       r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
+               octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+       return r;
 }
 
 static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
        .map = octeon_irq_ciu_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_ciu_xlat,
 };
 
 static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
        .map = octeon_irq_gpio_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_gpio_xlat,
 };
 
@@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void)
        }
 }
 
+static void octeon_irq_ip4_ciu(void)
+{
+       int coreid = cvmx_get_core_num();
+       u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
+       u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
+
+       ciu_sum &= ciu_en;
+       if (likely(ciu_sum)) {
+               int bit = fls64(ciu_sum) - 1;
+               int irq = octeon_irq_ciu_to_irq[2][bit];
+
+               if (likely(irq))
+                       do_IRQ(irq);
+               else
+                       spurious_interrupt();
+       } else {
+               spurious_interrupt();
+       }
+}
+
 static bool octeon_irq_use_ip4;
 
 static void octeon_irq_local_enable_ip4(void *arg)
@@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void)
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-       clear_c0_status(STATUSF_IP4);
+       if (octeon_irq_use_ip4)
+               set_c0_status(STATUSF_IP4);
+       else
+               clear_c0_status(STATUSF_IP4);
 }
 
 static void octeon_irq_setup_secondary_ciu2(void)
@@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void)
                clear_c0_status(STATUSF_IP4);
 }
 
-static void __init octeon_irq_init_ciu(void)
+static int __init octeon_irq_init_ciu(
+       struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i;
+       unsigned int i, r;
        struct irq_chip *chip;
+       struct irq_chip *chip_edge;
        struct irq_chip *chip_mbox;
        struct irq_chip *chip_wd;
-       struct device_node *gpio_node;
-       struct device_node *ciu_node;
        struct irq_domain *ciu_domain = NULL;
+       struct octeon_irq_ciu_domain_data *dd;
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       if (!dd)
+               return -ENOMEM;
 
        octeon_irq_init_ciu_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
 
        octeon_irq_ip2 = octeon_irq_ip2_ciu;
        octeon_irq_ip3 = octeon_irq_ip3_ciu;
+       if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+               && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+               octeon_irq_ip4 =  octeon_irq_ip4_ciu;
+               dd->num_sum = 3;
+               octeon_irq_use_ip4 = true;
+       } else {
+               octeon_irq_ip4 = octeon_irq_ip4_mask;
+               dd->num_sum = 2;
+               octeon_irq_use_ip4 = false;
+       }
        if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
            OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
            OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
-           OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+           OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
                chip = &octeon_irq_chip_ciu_v2;
+               chip_edge = &octeon_irq_chip_ciu_v2_edge;
                chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
                chip_wd = &octeon_irq_chip_ciu_wd_v2;
                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
        } else {
                chip = &octeon_irq_chip_ciu;
+               chip_edge = &octeon_irq_chip_ciu_edge;
                chip_mbox = &octeon_irq_chip_ciu_mbox;
                chip_wd = &octeon_irq_chip_ciu_wd;
                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
        }
        octeon_irq_ciu_chip = chip;
-       octeon_irq_ip4 = octeon_irq_ip4_mask;
+       octeon_irq_ciu_chip_edge = chip_edge;
 
        /* Mips internal */
        octeon_irq_init_core();
 
-       gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-       if (gpio_node) {
-               struct octeon_irq_gpio_domain_data *gpiod;
-
-               gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-               if (gpiod) {
-                       /* gpio domain host_data is the base hwirq number. */
-                       gpiod->base_hwirq = 16;
-                       irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
-                       of_node_put(gpio_node);
-               } else
-                       pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-       } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-       ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
-       if (ciu_node) {
-               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
-               irq_set_default_host(ciu_domain);
-               of_node_put(ciu_node);
-       } else
-               panic("Cannot find device node for cavium,octeon-3860-ciu.");
+       ciu_domain = irq_domain_add_tree(
+               ciu_node, &octeon_irq_domain_ciu_ops, dd);
+       irq_set_default_host(ciu_domain);
 
        /* CIU_0 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+       for (i = 0; i < 16; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+               if (r)
+                       goto err;
+       }
+
+       r = octeon_irq_set_ciu_mapping(
+               OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
+       if (r)
+               goto err;
+       r = octeon_irq_set_ciu_mapping(
+               OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+       if (r)
+               goto err;
+
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+               if (r)
+                       goto err;
+       }
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
+       if (r)
+               goto err;
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+       if (r)
+               goto err;
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+               if (r)
+                       goto err;
+       }
+
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+       if (r)
+               goto err;
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+       if (r)
+               goto err;
 
        /* CIU_1 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
+       for (i = 0; i < 16; i++) {
+               r = octeon_irq_set_ciu_mapping(
+                       i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
+                       handle_level_irq);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+       if (r)
+               goto err;
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-       clear_c0_status(STATUSF_IP4);
+       if (octeon_irq_use_ip4)
+               set_c0_status(STATUSF_IP4);
+       else
+               clear_c0_status(STATUSF_IP4);
+
+       return 0;
+err:
+       return r;
 }
 
+static int __init octeon_irq_init_gpio(
+       struct device_node *gpio_node, struct device_node *parent)
+{
+       struct octeon_irq_gpio_domain_data *gpiod;
+       u32 interrupt_cells;
+       unsigned int base_hwirq;
+       int r;
+
+       r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
+       if (r)
+               return r;
+
+       if (interrupt_cells == 1) {
+               u32 v;
+
+               r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               base_hwirq = v;
+       } else if (interrupt_cells == 2) {
+               u32 v0, v1;
+
+               r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               base_hwirq = (v0 << 6) | v1;
+       } else {
+               pr_warn("Bad \"#interrupt-cells\" property: %u\n",
+                       interrupt_cells);
+               return -EINVAL;
+       }
+
+       gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+       if (gpiod) {
+               /* gpio domain host_data is the base hwirq number. */
+               gpiod->base_hwirq = base_hwirq;
+               irq_domain_add_linear(
+                       gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+       } else {
+               pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
 /*
  * Watchdog interrupts are special.  They are associated with a single
  * core, so we hardwire the affinity to that core.
@@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = data->irq - OCTEON_IRQ_WDOG0;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data)
        u64 en_addr;
        int cpu = next_cpu_for_irq(data);
        int coreid = octeon_coreid_for_cpu(cpu);
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 }
 
@@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+                       octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
+                       octeon_coreid_for_cpu(cpu));
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
+                       octeon_coreid_for_cpu(cpu));
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
        if (!enable_one)
                return 0;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << cd.s.bit;
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
 
        for_each_online_cpu(cpu) {
                u64 en_addr;
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
                        enable_one = false;
-                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
+                               octeon_coreid_for_cpu(cpu)) +
+                               (0x1000ull * cd->line);
                } else {
-                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+                               octeon_coreid_for_cpu(cpu)) +
+                               (0x1000ull * cd->line);
                }
                cvmx_write_csr(en_addr, mask);
        }
@@ -1461,15 +1776,28 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
 
 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
-       cd.p = irq_data_get_irq_chip_data(data);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
 
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu2_disable_all(data);
 }
 
 static struct irq_chip octeon_irq_chip_ciu2 = {
+       .name = "CIU2-E",
+       .irq_enable = octeon_irq_ciu2_enable,
+       .irq_disable = octeon_irq_ciu2_disable_all,
+       .irq_mask = octeon_irq_ciu2_disable_local,
+       .irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu2_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_edge = {
        .name = "CIU2-E",
        .irq_enable = octeon_irq_ciu2_enable,
        .irq_disable = octeon_irq_ciu2_disable_all,
@@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 
        if (octeon_irq_ciu2_is_edge(line, bit))
                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          &octeon_irq_chip_ciu2,
+                                          &octeon_irq_chip_ciu2_edge,
                                           handle_edge_irq);
        else
                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
@@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 
        return 0;
 }
-static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
-                                   unsigned int virq, irq_hw_number_t hw)
-{
-       return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
-}
 
 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
        .map = octeon_irq_ciu2_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_ciu2_xlat,
 };
 
-static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
-       .map = octeon_irq_ciu2_gpio_map,
-       .xlate = octeon_irq_gpio_xlat,
-};
-
 static void octeon_irq_ciu2(void)
 {
        int line;
@@ -1674,16 +1993,16 @@ out:
        return;
 }
 
-static void __init octeon_irq_init_ciu2(void)
+static int __init octeon_irq_init_ciu2(
+       struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i;
-       struct device_node *gpio_node;
-       struct device_node *ciu_node;
+       unsigned int i, r;
        struct irq_domain *ciu_domain = NULL;
 
        octeon_irq_init_ciu2_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
 
+       octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
        octeon_irq_ip2 = octeon_irq_ciu2;
        octeon_irq_ip3 = octeon_irq_ciu2_mbox;
        octeon_irq_ip4 = octeon_irq_ip4_mask;
@@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void)
        /* Mips internal */
        octeon_irq_init_core();
 
-       gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-       if (gpio_node) {
-               struct octeon_irq_gpio_domain_data *gpiod;
-
-               gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-               if (gpiod) {
-                       /* gpio domain host_data is the base hwirq number. */
-                       gpiod->base_hwirq = 7 << 6;
-                       irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
-                       of_node_put(gpio_node);
-               } else
-                       pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-       } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-       ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
-       if (ciu_node) {
-               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
-               irq_set_default_host(ciu_domain);
-               of_node_put(ciu_node);
-       } else
-               panic("Cannot find device node for cavium,octeon-6880-ciu2.");
+       ciu_domain = irq_domain_add_tree(
+               ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+       irq_set_default_host(ciu_domain);
 
        /* CUI2 */
-       for (i = 0; i < 64; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+       for (i = 0; i < 64; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 32; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
-                                          &octeon_irq_chip_ciu2_wd, handle_level_irq);
+       for (i = 0; i < 32; i++) {
+               r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
+                       &octeon_irq_chip_ciu2_wd, handle_level_irq);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+       if (r)
+               goto err;
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+               if (r)
+                       goto err;
+       }
 
        irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
        irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
@@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void)
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
        clear_c0_status(STATUSF_IP4);
+       return 0;
+err:
+       return r;
+}
+
+struct octeon_irq_cib_host_data {
+       raw_spinlock_t lock;
+       u64 raw_reg;
+       u64 en_reg;
+       int max_bits;
+};
+
+struct octeon_irq_cib_chip_data {
+       struct octeon_irq_cib_host_data *host_data;
+       int bit;
+};
+
+static void octeon_irq_cib_enable(struct irq_data *data)
+{
+       unsigned long flags;
+       u64 en;
+       struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+       struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+       raw_spin_lock_irqsave(&host_data->lock, flags);
+       en = cvmx_read_csr(host_data->en_reg);
+       en |= 1ull << cd->bit;
+       cvmx_write_csr(host_data->en_reg, en);
+       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static void octeon_irq_cib_disable(struct irq_data *data)
+{
+       unsigned long flags;
+       u64 en;
+       struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+       struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+       raw_spin_lock_irqsave(&host_data->lock, flags);
+       en = cvmx_read_csr(host_data->en_reg);
+       en &= ~(1ull << cd->bit);
+       cvmx_write_csr(host_data->en_reg, en);
+       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
+{
+       irqd_set_trigger_type(data, t);
+       return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip octeon_irq_chip_cib = {
+       .name = "CIB",
+       .irq_enable = octeon_irq_cib_enable,
+       .irq_disable = octeon_irq_cib_disable,
+       .irq_mask = octeon_irq_cib_disable,
+       .irq_unmask = octeon_irq_cib_enable,
+       .irq_set_type = octeon_irq_cib_set_type,
+};
+
+static int octeon_irq_cib_xlat(struct irq_domain *d,
+                                  struct device_node *node,
+                                  const u32 *intspec,
+                                  unsigned int intsize,
+                                  unsigned long *out_hwirq,
+                                  unsigned int *out_type)
+{
+       unsigned int type = 0;
+
+       if (intsize == 2)
+               type = intspec[1];
+
+       switch (type) {
+       case 0: /* unofficial value, but we might as well let it work. */
+       case 4: /* official value for level triggering. */
+               *out_type = IRQ_TYPE_LEVEL_HIGH;
+               break;
+       case 1: /* official value for edge triggering. */
+               *out_type = IRQ_TYPE_EDGE_RISING;
+               break;
+       default: /* Nothing else is acceptable. */
+               return -EINVAL;
+       }
+
+       *out_hwirq = intspec[0];
+
+       return 0;
+}
+
+static int octeon_irq_cib_map(struct irq_domain *d,
+                             unsigned int virq, irq_hw_number_t hw)
+{
+       struct octeon_irq_cib_host_data *host_data = d->host_data;
+       struct octeon_irq_cib_chip_data *cd;
+
+       if (hw >= host_data->max_bits) {
+               pr_err("ERROR: %s mapping %u is to big!\n",
+                      d->of_node->name, (unsigned)hw);
+               return -EINVAL;
+       }
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       cd->host_data = host_data;
+       cd->bit = hw;
+
+       irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
+                                handle_simple_irq);
+       irq_set_chip_data(virq, cd);
+       return 0;
 }
 
+static struct irq_domain_ops octeon_irq_domain_cib_ops = {
+       .map = octeon_irq_cib_map,
+       .unmap = octeon_irq_free_cd,
+       .xlate = octeon_irq_cib_xlat,
+};
+
+/* Chain to real handler. */
+static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
+{
+       u64 en;
+       u64 raw;
+       u64 bits;
+       int i;
+       int irq;
+       struct irq_domain *cib_domain = data;
+       struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
+
+       en = cvmx_read_csr(host_data->en_reg);
+       raw = cvmx_read_csr(host_data->raw_reg);
+
+       bits = en & raw;
+
+       for (i = 0; i < host_data->max_bits; i++) {
+               if ((bits & 1ull << i) == 0)
+                       continue;
+               irq = irq_find_mapping(cib_domain, i);
+               if (!irq) {
+                       unsigned long flags;
+
+                       pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
+                               i, host_data->raw_reg);
+                       raw_spin_lock_irqsave(&host_data->lock, flags);
+                       en = cvmx_read_csr(host_data->en_reg);
+                       en &= ~(1ull << i);
+                       cvmx_write_csr(host_data->en_reg, en);
+                       cvmx_write_csr(host_data->raw_reg, 1ull << i);
+                       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+               } else {
+                       struct irq_desc *desc = irq_to_desc(irq);
+                       struct irq_data *irq_data = irq_desc_get_irq_data(desc);
+                       /* If edge, acknowledge the bit we will be sending. */
+                       if (irqd_get_trigger_type(irq_data) &
+                               IRQ_TYPE_EDGE_BOTH)
+                               cvmx_write_csr(host_data->raw_reg, 1ull << i);
+                       generic_handle_irq_desc(irq, desc);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+                                     struct device_node *parent)
+{
+       const __be32 *addr;
+       u32 val;
+       struct octeon_irq_cib_host_data *host_data;
+       int parent_irq;
+       int r;
+       struct irq_domain *cib_domain;
+
+       parent_irq = irq_of_parse_and_map(ciu_node, 0);
+       if (!parent_irq) {
+               pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
+                       ciu_node->name);
+               return -EINVAL;
+       }
+
+       host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+       raw_spin_lock_init(&host_data->lock);
+
+       addr = of_get_address(ciu_node, 0, NULL, NULL);
+       if (!addr) {
+               pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
+               return -EINVAL;
+       }
+       host_data->raw_reg = (u64)phys_to_virt(
+               of_translate_address(ciu_node, addr));
+
+       addr = of_get_address(ciu_node, 1, NULL, NULL);
+       if (!addr) {
+               pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
+               return -EINVAL;
+       }
+       host_data->en_reg = (u64)phys_to_virt(
+               of_translate_address(ciu_node, addr));
+
+       r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+       if (r) {
+               pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
+                       ciu_node->name);
+               return r;
+       }
+       host_data->max_bits = val;
+
+       cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
+                                          &octeon_irq_domain_cib_ops,
+                                          host_data);
+       if (!cib_domain) {
+               pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
+               return -ENOMEM;
+       }
+
+       cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
+       cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
+
+       r = request_irq(parent_irq, octeon_irq_cib_handler,
+                       IRQF_NO_THREAD, "cib", cib_domain);
+       if (r) {
+               pr_err("request_irq cib failed %d\n", r);
+               return r;
+       }
+       pr_info("CIB interrupt controller probed: %llx %d\n",
+               host_data->raw_reg, host_data->max_bits);
+       return 0;
+}
+
+static struct of_device_id ciu_types[] __initdata = {
+       {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
+       {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
+       {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+       {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
+       {}
+};
+
 void __init arch_init_irq(void)
 {
 #ifdef CONFIG_SMP
@@ -1750,10 +2305,7 @@ void __init arch_init_irq(void)
        cpumask_clear(irq_default_affinity);
        cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 #endif
-       if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-               octeon_irq_init_ciu2();
-       else
-               octeon_irq_init_ciu();
+       of_irq_init(ciu_types);
 }
 
 asmlinkage void plat_irq_dispatch(void)
@@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void)
                cop0_cause &= cop0_status;
                cop0_cause &= ST0_IM;
 
-               if (unlikely(cop0_cause & STATUSF_IP2))
+               if (cop0_cause & STATUSF_IP2)
                        octeon_irq_ip2();
-               else if (unlikely(cop0_cause & STATUSF_IP3))
+               else if (cop0_cause & STATUSF_IP3)
                        octeon_irq_ip3();
-               else if (unlikely(cop0_cause & STATUSF_IP4))
+               else if (cop0_cause & STATUSF_IP4)
                        octeon_irq_ip4();
-               else if (likely(cop0_cause))
+               else if (cop0_cause)
                        do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
                else
                        break;
index 94f888d3384e247b542b15faae2da18147088f24..a42110e7edbcabefdea9a011de9e032f88388a87 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/pci-octeon.h>
 #include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rst-defs.h>
 
 extern struct plat_smp_ops octeon_smp_ops;
 
@@ -579,12 +580,10 @@ void octeon_user_io_init(void)
        /* R/W If set, CVMSEG is available for loads/stores in user
         * mode. */
        cvmmemctl.s.cvmsegenau = 0;
-       /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
-        * is max legal value. */
-       cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
 
        write_c0_cvmmemctl(cvmmemctl.u64);
 
+       /* Setup of CVMSEG is done in kernel-entry-init.h */
        if (smp_processor_id() == 0)
                pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
                          CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
@@ -615,6 +614,7 @@ void __init prom_init(void)
        const char *arg;
        char *p;
        int i;
+       u64 t;
        int argc;
 #ifdef CONFIG_CAVIUM_RESERVE32
        int64_t addr = -1;
@@ -654,15 +654,56 @@ void __init prom_init(void)
        sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
        sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 
-       if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+       if (OCTEON_IS_OCTEON2()) {
                /* I/O clock runs at a different rate than the CPU. */
                union cvmx_mio_rst_boot rst_boot;
                rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
                octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+       } else if (OCTEON_IS_OCTEON3()) {
+               /* I/O clock runs at a different rate than the CPU. */
+               union cvmx_rst_boot rst_boot;
+               rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+               octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
        } else {
                octeon_io_clock_rate = sysinfo->cpu_clock_hz;
        }
 
+       t = read_c0_cvmctl();
+       if ((t & (1ull << 27)) == 0) {
+               /*
+                * Setup the multiplier save/restore code if
+                * CvmCtl[NOMUL] clear.
+                */
+               void *save;
+               void *save_end;
+               void *restore;
+               void *restore_end;
+               int save_len;
+               int restore_len;
+               int save_max = (char *)octeon_mult_save_end -
+                       (char *)octeon_mult_save;
+               int restore_max = (char *)octeon_mult_restore_end -
+                       (char *)octeon_mult_restore;
+               if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
+                       save = octeon_mult_save3;
+                       save_end = octeon_mult_save3_end;
+                       restore = octeon_mult_restore3;
+                       restore_end = octeon_mult_restore3_end;
+               } else {
+                       save = octeon_mult_save2;
+                       save_end = octeon_mult_save2_end;
+                       restore = octeon_mult_restore2;
+                       restore_end = octeon_mult_restore2_end;
+               }
+               save_len = (char *)save_end - (char *)save;
+               restore_len = (char *)restore_end - (char *)restore;
+               if (!WARN_ON(save_len > save_max ||
+                               restore_len > restore_max)) {
+                       memcpy(octeon_mult_save, save, save_len);
+                       memcpy(octeon_mult_restore, restore, restore_len);
+               }
+       }
+
        /*
         * Only enable the LED controller if we're running on a CN38XX, CN58XX,
         * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar);
 
 void prom_free_prom_memory(void)
 {
-       if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
+       if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
                /* Check for presence of Core-14449 fix.  */
                u32 insn;
                u32 *foo;
@@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void)
                        panic("No PREF instruction at Core-14449 probe point.");
 
                if (((insn >> 16) & 0x1f) != 28)
-                       panic("Core-14449 WAR not in place (%04x).\n"
-                             "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
+                       panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
+                             "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
+                             insn);
        }
 }
 
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644 (file)
index 0000000..4bce1f8
--- /dev/null
@@ -0,0 +1,193 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index f9f5307434c276a35624f5c137333867e2357794..19f710117d974bf2a0da97764e7bc3ec428b3fc8 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
 #include <asm/sgialib.h>
 #include <asm/bootinfo.h>
 
-VOID
+VOID __noreturn
 ArcHalt(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(halt);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcPowerDown(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(pdown);
-never: goto never;
+
+       unreachable();
 }
 
 /* XXX is this a soft reset basically? XXX */
-VOID
+VOID __noreturn
 ArcRestart(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(restart);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcReboot(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(reboot);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcEnterInteractiveMode(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(imode);
-never: goto never;
+
+       unreachable();
 }
 
 LONG
index 200efeac41813c2a17e237156c6410253f488d8b..526539cbc99f6792b21d35dbfc1d044f0f817a87 100644 (file)
@@ -1,4 +1,5 @@
 # MIPS headers
+generic-(CONFIG_GENERIC_CSUM) += checksum.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += dma-contiguous.h
index 6caf8766b80f161ea7a620ecc33cca7768e1d4b0..0cae4595e985bbc3d8043b3bb85aef66c582615b 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/asmmacro-64.h>
 #endif
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  local_irq_enable reg=t0
        ei
        irq_enable_hazard
        .endm
 
        .macro  fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        sll     \tmp, \status, 5
        bgez    \tmp, 10f
        fpu_save_16odd \thread
        .endm
 
        .macro  fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        sll     \tmp, \status, 5
        bgez    \tmp, 10f                               # 16 register mode?
 
        fpu_restore_16even \thread \tmp
        .endm
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  _EXT    rd, rs, p, s
        ext     \rd, \rs, \p, \s
        .endm
-#else /* !CONFIG_CPU_MIPSR2 */
+#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
        .macro  _EXT    rd, rs, p, s
        srl     \rd, \rs, \p
        andi    \rd, \rd, (1 << \s) - 1
        .endm
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 
 /*
  * Temporary until all gas have MT ASE support
        .set    push
        .set    noat
        SET_HARDFLOAT
-       add     $1, \base, \off
+       addu    $1, \base, \off
        .word   LDD_MSA_INSN | (\wd << 6)
        .set    pop
        .endm
        .set    push
        .set    noat
        SET_HARDFLOAT
-       add     $1, \base, \off
+       addu    $1, \base, \off
        .word   STD_MSA_INSN | (\wd << 6)
        .set    pop
        .endm
index 857da84cfc92eb20bd7f29cb5d9b3c1e16b86203..26d436336f2e18c9e8a3e5fd888e2f97bdb0a2dd 100644 (file)
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                           \
                "       sc      %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       ll      %0, %1          # atomic_" #op "\n"   \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       sc      %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
-                 "+" GCC_OFF12_ASM() (v->counter)                            \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       ll      %1, %2  # atomic_" #op "_return \n"   \
                        "       " #asm_op " %0, %1, %3                  \n"   \
                        "       sc      %0, %2                          \n"   \
                        "       .set    mips0                           \n"   \
                        : "=&r" (result), "=&r" (temp),                       \
-                         "+" GCC_OFF12_ASM() (v->counter)                    \
+                         "+" GCC_OFF_SMALL_ASM() (v->counter)                \
                        : "Ir" (i));                                          \
                } while (unlikely(!result));                                  \
                                                                              \
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
                : "memory");
        } else if (kernel_uses_llsc) {
                int temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
                : "Ir" (i));
        } else {
                unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)                    \
                "       scd     %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       lld     %0, %1          # atomic64_" #op "\n" \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       scd     %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
-                 "+" GCC_OFF12_ASM() (v->counter)                            \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       lld     %1, %2  # atomic64_" #op "_return\n"  \
                        "       " #asm_op " %0, %1, %3                  \n"   \
                        "       scd     %0, %2                          \n"   \
                        "       .set    mips0                           \n"   \
                        : "=&r" (result), "=&r" (temp),                       \
-                         "=" GCC_OFF12_ASM() (v->counter)                    \
-                       : "Ir" (i), GCC_OFF12_ASM() (v->counter)              \
+                         "=" GCC_OFF_SMALL_ASM() (v->counter)                \
+                       : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
                        : "memory");                                          \
                } while (unlikely(!result));                                  \
                                                                              \
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "=" GCC_OFF12_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+                 "=" GCC_OFF_SMALL_ASM() (v->counter)
+               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
                : "memory");
        } else if (kernel_uses_llsc) {
                long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
                : "Ir" (i));
        } else {
                unsigned long flags;
index 6663bcca9d0c626886529ae5eb7a92e75b75cd46..9f935f6aa996ddfd573b9ccb4f6261118934a415 100644 (file)
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC  "%0, %1                                 \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
-               : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
-#ifdef CONFIG_CPU_MIPSR2
+               : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+               : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       " __INS "%0, %3, %2, 1                  \n"
                        "       " __SC "%0, %1                          \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (bit), "r" (~0));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       or      %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
        } else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC "%0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                : "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       " __INS "%0, $0, %2, 1                  \n"
                        "       " __SC "%0, %1                          \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (bit));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       and     %0, %2                          \n"
                        "       " __SC "%0, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (~(1UL << bit)));
                } while (unlikely(!temp));
        } else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC  "%0, %1                         \n"
                "       beqzl   %0, 1b                          \n"
                "       .set    mips0                           \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                : "ir" (1UL << bit));
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # change_bit    \n"
                        "       xor     %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
        } else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
        } else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                        "       " __EXT "%2, %0, %3, 1                  \n"
                        "       " __INS "%0, $0, %3, 1                  \n"
                        "       " __SC  "%0, %1                         \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "ir" (bit)
                        : "memory");
                } while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       or      %2, %0, %3                      \n"
                        "       xor     %2, %3                          \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
        } else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL  "%0, %1 # test_and_change_bit   \n"
                        "       xor     %2, %0, %3                      \n"
                        "       " __SC  "\t%2, %1                       \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips32                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips64                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       dclz    %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
        if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips32                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (x)
index 3418c51e11512ed2a3957448fbb68d83ebac1858..5c585c5c1c3e3fe3ee6bacdfd8ce0f59be82e27e 100644 (file)
 #ifndef _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
@@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
  */
 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
                                       int len, __wsum sum);
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
 
 /*
  *     Fold a partial checksum without adding pseudo headers
  */
-static inline __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
-       __asm__(
-       "       .set    push            # csum_fold\n"
-       "       .set    noat            \n"
-       "       sll     $1, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       sltu    $1, %0, $1      \n"
-       "       srl     %0, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       xori    %0, 0xffff      \n"
-       "       .set    pop"
-       : "=r" (sum)
-       : "0" (sum));
+       u32 sum = (__force u32)csum;;
 
-       return (__force __sum16)sum;
+       sum += (sum << 16);
+       csum = (sum < csum);
+       sum >>= 16;
+       sum += csum;
+
+       return (__force __sum16)~sum;
 }
+#define csum_fold csum_fold
 
 /*
  *     This is a version of ip_compute_csum() optimized for IP headers,
@@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 
        return csum_fold(csum);
 }
+#define ip_fast_csum ip_fast_csum
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
        __be32 daddr, unsigned short len, unsigned short proto,
@@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
 
        return sum;
 }
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-                                                  unsigned short len,
-                                                  unsigned short proto,
-                                                  __wsum sum)
-{
-       return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
@@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
        return csum_fold(sum);
 }
 
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
 #endif /* _ASM_CHECKSUM_H */
index 28b1edf195016b80a89854b9ae11c28a1a9c04ce..d0a2a68ca600670ead4d5535751267f6889e93f4 100644 (file)
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                "       sc      %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF12_ASM() (*m), "Jr" (val)
+               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       ll      %0, %3          # xchg_u32      \n"
                        "       .set    mips0                           \n"
                        "       move    %2, %z4                         \n"
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       sc      %2, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+                       : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
                          "=&r" (dummy)
-                       : GCC_OFF12_ASM() (*m), "Jr" (val)
+                       : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                        : "memory");
                } while (unlikely(!dummy));
        } else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                "       scd     %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF12_ASM() (*m), "Jr" (val)
+               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       lld     %0, %3          # xchg_u64      \n"
                        "       move    %2, %z4                         \n"
                        "       scd     %2, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+                       : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
                          "=&r" (dummy)
-                       : GCC_OFF12_ASM() (*m), "Jr" (val)
+                       : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                        : "memory");
                } while (unlikely(!dummy));
        } else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       beqzl   $1, 1b                          \n"     \
                "2:                                             \n"     \
                "       .set    pop                             \n"     \
-               : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)               \
-               : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)          \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)              \
                : "memory");                                            \
        } else if (kernel_uses_llsc) {                                  \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "       " st "  $1, %1                          \n"     \
                "       beqz    $1, 1b                          \n"     \
                "       .set    pop                             \n"     \
                "2:                                             \n"     \
-               : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)               \
-               : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)          \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)              \
                : "memory");                                            \
        } else {                                                        \
                unsigned long __flags;                                  \
index c73815e0123a756bc0579806c7e8333b26712d08..e081a265f4227475d17fef0d53cf07f7e9ffb6a3 100644 (file)
 #define GCC_REG_ACCUM "accum"
 #endif
 
+#ifdef CONFIG_CPU_MIPSR6
+/* All MIPS R6 toolchains support the ZC constrain */
+#define GCC_OFF_SMALL_ASM() "ZC"
+#else
 #ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF12_ASM() "R"
+#define GCC_OFF_SMALL_ASM() "R"
 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF12_ASM() "ZC"
+#define GCC_OFF_SMALL_ASM() "ZC"
 #else
 #error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif
+#endif /* CONFIG_CPU_MICROMIPS */
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #endif /* _ASM_COMPILER_H */
index 2897cfafcaf097f01e17a561488eafe032924933..0d8208de9a3fadaff9308544af2c955f7b967a34 100644 (file)
@@ -38,6 +38,9 @@
 #ifndef cpu_has_maar
 #define cpu_has_maar           (cpu_data[0].options & MIPS_CPU_MAAR)
 #endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb         (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#endif
 
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
 #endif
 #endif
 
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1                (!cpu_has_mips_r6)
+#endif
 #ifndef cpu_has_mips_2
 # define cpu_has_mips_2                (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
 #endif
 #ifndef cpu_has_mips32r2
 # define cpu_has_mips32r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 #endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+#endif
 #ifndef cpu_has_mips64r1
 # define cpu_has_mips64r1      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 #endif
 #ifndef cpu_has_mips64r2
 # define cpu_has_mips64r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 #endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+#endif
 
 /*
  * Shortcuts ...
 #define cpu_has_mips_4_5_r     (cpu_has_mips_4 | cpu_has_mips_5_r)
 #define cpu_has_mips_5_r       (cpu_has_mips_5 | cpu_has_mips_r)
 
-#define cpu_has_mips_4_5_r2    (cpu_has_mips_4_5 | cpu_has_mips_r2)
+#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
+                                cpu_has_mips_r6)
 
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6        (cpu_has_mips32r6 | cpu_has_mips64r6)
 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
-                        cpu_has_mips64r1 | cpu_has_mips64r2)
+                        cpu_has_mips32r6 | cpu_has_mips64r1 | \
+                        cpu_has_mips64r2 | cpu_has_mips64r6)
+
+/* MIPSR2 and MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6     (cpu_has_mips_r2 | cpu_has_mips_r6)
 
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
 #endif
 
 /*
index a6c9ccb33c5c9a35ceaac1485fb10f02a97da4b2..c3f4f2d2e1088459b2aa10c6292e5de76665d5bc 100644 (file)
@@ -84,6 +84,11 @@ struct cpuinfo_mips {
         * (shifted by _CACHE_SHIFT)
         */
        unsigned int            writecombine;
+       /*
+        * Simple counter to prevent enabling HTW in nested
+        * htw_start/htw_stop calls
+        */
+       unsigned int            htw_seq;
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
index b4e2bd87df5030457b2f397b7e52565b7d2dc4b5..8245875f8b33be3156cfe42b63b0f1d788afebf8 100644 (file)
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
        case CPU_M5150:
 #endif
 
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+       case CPU_QEMU_GENERIC:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
        case CPU_5KC:
        case CPU_5KE:
index 33866fce4d633a177636c1d386e180bdbd8ef02a..15687234d70a6bcd1dd211543fa8ac494ca44902 100644 (file)
@@ -93,6 +93,7 @@
  * These are the PRID's for when 23:16 == PRID_COMP_MIPS
  */
 
+#define PRID_IMP_QEMU_GENERIC  0x0000
 #define PRID_IMP_4KC           0x8000
 #define PRID_IMP_5KC           0x8100
 #define PRID_IMP_20KC          0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
        CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
        CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
+       CPU_QEMU_GENERIC,
+
        CPU_LAST
 };
 
@@ -329,11 +332,14 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M32R2     0x00000020
 #define MIPS_CPU_ISA_M64R1     0x00000040
 #define MIPS_CPU_ISA_M64R2     0x00000080
+#define MIPS_CPU_ISA_M32R6     0x00000100
+#define MIPS_CPU_ISA_M64R6     0x00000200
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
-       MIPS_CPU_ISA_M32R2)
+       MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
-       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+       MIPS_CPU_ISA_M64R6)
 
 /*
  * CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_RIXIEX                0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
 #define MIPS_CPU_MAAR          0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_FRE           0x800000000ull /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB                0x1000000000ull /* LLADDR/LLB writes are allowed */
 
 /*
  * CPU ASE encodings
index ae6fedcb0060f22c69091480f55228dd2ef4383d..94105d3f58f4882849643cfcb8668857198f2117 100644 (file)
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
                "       sc      %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
-               : GCC_OFF12_ASM() (*virt_addr));
+               : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+               : GCC_OFF_SMALL_ASM() (*virt_addr));
 
                virt_addr++;
        }
index eb4d95de619c5dca7543ed551267e43799f4ca11..535f196ffe02da7ad769ab0b7053b4dd5253dd82 100644 (file)
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
 struct arch_elf_state {
        int fp_abi;
        int interp_fp_abi;
-       int overall_abi;
+       int overall_fp_mode;
 };
 
+#define MIPS_ABI_FP_UNKNOWN    (-1)    /* Unknown FP ABI (kernel internal) */
+
 #define INIT_ARCH_ELF_STATE {                  \
-       .fp_abi = -1,                           \
-       .interp_fp_abi = -1,                    \
-       .overall_abi = -1,                      \
+       .fp_abi = MIPS_ABI_FP_UNKNOWN,          \
+       .interp_fp_abi = MIPS_ABI_FP_UNKNOWN,   \
+       .overall_fp_mode = -1,                  \
 }
 
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
index affebb78f5d6573dbf97f62630ad1e6a35026602..dd083e999b08a14ffdbef46d5f5f4a0731e9f18e 100644 (file)
@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
                goto fr_common;
 
        case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+      || defined(CONFIG_64BIT))
                /* we only have a 32-bit FPU */
                return SIGFPE;
 #endif
index ef9987a61d88c62e79e38e3c406a3bbcc42e1a23..1de190bdfb9c9fef90b8976503242e09b97e7f60 100644 (file)
                "       "__UA_ADDR "\t2b, 4b                    \n"     \
                "       .previous                               \n"     \
                : "=r" (ret), "=&r" (oldval),                           \
-                 "=" GCC_OFF12_ASM() (*uaddr)                          \
-               : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),      \
+                 "=" GCC_OFF_SMALL_ASM() (*uaddr)                              \
+               : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),  \
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else if (cpu_has_llsc) {                                      \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "1:     "user_ll("%1", "%4")" # __futex_atomic_op\n"    \
                "       .set    mips0                           \n"     \
                "       " insn  "                               \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "2:     "user_sc("$1", "%2")"                   \n"     \
                "       beqz    $1, 1b                          \n"     \
                __WEAK_LLSC_MB                                          \
@@ -74,8 +74,8 @@
                "       "__UA_ADDR "\t2b, 4b                    \n"     \
                "       .previous                               \n"     \
                : "=r" (ret), "=&r" (oldval),                           \
-                 "=" GCC_OFF12_ASM() (*uaddr)                          \
-               : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),      \
+                 "=" GCC_OFF_SMALL_ASM() (*uaddr)                              \
+               : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),  \
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else                                                          \
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-               : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+               : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+               : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:     "user_ll("%1", "%3")"                           \n"
                "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
                "       move    $1, %z5                                 \n"
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "2:     "user_sc("$1", "%2")"                           \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-               : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+               : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+               : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
        } else
index 4be1a57cdbb055915c862cff1fb55966b8214726..71a986e9b694d672823a87c17260f8fd39882e24 100644 (file)
@@ -25,8 +25,6 @@ struct gio_driver {
 
        int  (*probe)(struct gio_device *, const struct gio_device_id *);
        void (*remove)(struct gio_device *);
-       int  (*suspend)(struct gio_device *, pm_message_t);
-       int  (*resume)(struct gio_device *);
        void (*shutdown)(struct gio_device *);
 
        struct device_driver driver;
index e3ee92d4dbe750c7aa05a5488f7443cdd64fb387..4087b47ad1cbea16050e968a4daf9f0531b4aa6d 100644 (file)
@@ -11,6 +11,7 @@
 #define _ASM_HAZARDS_H
 
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 
 #define ___ssnop                                                       \
        sll     $0, $0, 1
@@ -21,7 +22,7 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do {                                                                  \
        unsigned long tmp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "       .set    mips64r2                                \n"     \
+       "       .set "MIPS_ISA_LEVEL"                           \n"     \
        "       dla     %0, 1f                                  \n"     \
        "       jr.hb   %0                                      \n"     \
        "       .set    mips0                                   \n"     \
@@ -132,7 +133,7 @@ do {                                                                        \
 
 #define instruction_hazard()                                           \
 do {                                                                   \
-       if (cpu_has_mips_r2)                                            \
+       if (cpu_has_mips_r2_r6)                                         \
                __instruction_hazard();                                 \
 } while (0)
 
@@ -240,7 +241,7 @@ do {                                                                        \
 
 #define __disable_fpu_hazard
 
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 #define __enable_fpu_hazard                                            \
        ___ehb
index 0fa5fdcd1f01f273da67b1530aa67ff5ee1646c6..d60cc68fa31e4f908685dd9c9f332e8df3d36242 100644 (file)
 
 #include <linux/compiler.h>
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 #include <asm/hazards.h>
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
 
 static inline void arch_local_irq_disable(void)
 {
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 static inline void arch_local_irq_enable(void)
 {
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#if   defined(CONFIG_CPU_MIPSR2)
+#if   defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        "       ei                                                      \n"
 #else
        "       mfc0    $1,$12                                          \n"
index 46dfc3c1fd49777a41b3158c77b1fc5c49955087..8feaed62a2abab216da39e8dd99786f2340d3f60 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/bitops.h>
 #include <linux/atomic.h>
 #include <asm/cmpxchg.h>
+#include <asm/compiler.h>
 #include <asm/war.h>
 
 typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:"    __LL    "%1, %2         # local_add_return      \n"
                "       addu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:"    __LL    "%1, %2         # local_sub_return      \n"
                "       subu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
index 1668ee57acb90b82e679b50b4d687940d0a9f39f..cf92fe7339952b43f0a8585bc9a51d351fca0f50 100644 (file)
@@ -8,11 +8,10 @@
 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 
-
-#define CP0_CYCLE_COUNTER $9, 6
 #define CP0_CVMCTL_REG $9, 7
 #define CP0_CVMMEMCTL_REG $11,7
 #define CP0_PRID_REG $15, 0
+#define CP0_DCACHE_ERR_REG $27, 1
 #define CP0_PRID_OCTEON_PASS1 0x000d0000
 #define CP0_PRID_OCTEON_CN30XX 0x000d0200
 
        # Needed for octeon specific memcpy
        or  v0, v0, 0x5001
        xor v0, v0, 0x1001
-       # Read the processor ID register
-       mfc0 v1, CP0_PRID_REG
-       # Disable instruction prefetching (Octeon Pass1 errata)
-       or  v0, v0, 0x2000
-       # Skip reenable of prefetching for Octeon Pass1
-       beq v1, CP0_PRID_OCTEON_PASS1, skip
-       nop
-       # Reenable instruction prefetching, not on Pass1
-       xor v0, v0, 0x2000
-       # Strip off pass number off of processor id
-       srl v1, 8
-       sll v1, 8
-       # CN30XX needs some extra stuff turned off for better performance
-       bne v1, CP0_PRID_OCTEON_CN30XX, skip
-       nop
-       # CN30XX Use random Icache replacement
-       or  v0, v0, 0x400
-       # CN30XX Disable instruction prefetching
-       or  v0, v0, 0x2000
-skip:
        # First clear off CvmCtl[IPPCI] bit and move the performance
        # counters interrupt to IRQ 6
-       li      v1, ~(7 << 7)
+       dli     v1, ~(7 << 7)
        and     v0, v0, v1
        ori     v0, v0, (6 << 7)
+
+       mfc0    v1, CP0_PRID_REG
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9000          # 63-P1
+       beqz    t1, 4f
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9008          # 63-P2
+       beqz    t1, 4f
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9100          # 68-P1
+       beqz    t1, 4f
+       and     t1, v1, 0xff00
+       xor     t1, t1, 0x9200          # 66-PX
+       bnez    t1, 5f                  # Skip WAR for others.
+       and     t1, v1, 0x00ff
+       slti    t1, t1, 2               # 66-P1.2 and later good.
+       beqz    t1, 5f
+
+4:     # core-16057 work around
+       or      v0, v0, 0x2000          # Set IPREF bit.
+
+5:     # No core-16057 work around
        # Write the cavium control register
        dmtc0   v0, CP0_CVMCTL_REG
        sync
        # Flush dcache after config change
        cache   9, 0($0)
+       # Zero all of CVMSEG to make sure parity is correct
+       dli     v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+       dsll    v0, 7
+       beqz    v0, 2f
+1:     dsubu   v0, 8
+       sd      $0, -32768(v0)
+       bnez    v0, 1b
+2:
+       mfc0    v0, CP0_PRID_REG
+       bbit0   v0, 15, 1f
+       # OCTEON II or better have bit 15 set.  Clear the error bits.
+       and     t1, v0, 0xff00
+       dli     v0, 0x9500
+       bge     t1, v0, 1f  # OCTEON III has no DCACHE_ERR_REG COP0
+       dli     v0, 0x27
+       dmtc0   v0, CP0_DCACHE_ERR_REG
+1:
        # Get my core id
        rdhwr   v0, $0
        # Jump the master to kernel_entry
index eb72b35cf04b5bf4f45dc6668b763b05fdce0bdf..35c80be92207beef97ebc536b31ae93d4f0a133a 100644 (file)
@@ -22,4 +22,7 @@
 #define R10000_LLSC_WAR                        0
 #define MIPS34K_MISSED_ITLB_WAR                0
 
+#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR      \
+       OCTEON_IS_MODEL(OCTEON_CN6XXX)
+
 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
index 986982db7c38c95cf8ca0e4beac9bcee6570f8fe..79cff26d8b36f16cb333d9af2e56383db72b4222 100644 (file)
@@ -27,8 +27,6 @@ struct jz_nand_platform_data {
 
        struct nand_ecclayout   *ecc_layout;
 
-       unsigned int busy_gpio;
-
        unsigned char banks[JZ_NAND_NUM_BANKS];
 
        void (*ident_callback)(struct platform_device *, struct nand_chip *,
index 2e54b4bff5cf59e744b9cb3a83e44bca747a9136..90dbe43c8d272d2cc5a95d1e4a2fbf20a4f1d4a6 100644 (file)
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (~mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
        "       .set    arch=r4000                      \n"     \
        "1:     ll      %0, %1  #custom_read_reg32      \n"     \
        "       .set    pop                             \n"     \
-       : "=r" (tmp), "=" GCC_OFF12_ASM() (*address)            \
-       : GCC_OFF12_ASM() (*address))
+       : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)                \
+       : GCC_OFF_SMALL_ASM() (*address))
 
 #define custom_write_reg32(address, tmp)                       \
        __asm__ __volatile__(                                   \
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
        "       "__beqz"%0, 1b                          \n"     \
        "       nop                                     \n"     \
        "       .set    pop                             \n"     \
-       : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address)           \
-       : "0" (tmp), GCC_OFF12_ASM() (*address))
+       : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)               \
+       : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
 
 #endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644 (file)
index 0000000..60570f2
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+       u64 movs;
+       u64 hilo;
+       u64 muls;
+       u64 divs;
+       u64 dsps;
+       u64 bops;
+       u64 traps;
+       u64 fpus;
+       u64 loads;
+       u64 stores;
+       u64 llsc;
+       u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+       u64 jrs;
+       u64 bltzl;
+       u64 bgezl;
+       u64 bltzll;
+       u64 bgezll;
+       u64 bltzall;
+       u64 bgezall;
+       u64 bltzal;
+       u64 bgezal;
+       u64 beql;
+       u64 bnel;
+       u64 blezl;
+       u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M)                                               \
+do {                                                                   \
+       u32 nir;                                                        \
+       int err;                                                        \
+                                                                       \
+       preempt_disable();                                              \
+       __this_cpu_inc(mipsr2emustats.M);                               \
+       err = __get_user(nir, (u32 __user *)regs->cp0_epc);             \
+       if (!err) {                                                     \
+               if (nir == BREAK_MATH)                                  \
+                       __this_cpu_inc(mipsr2bdemustats.M);             \
+       }                                                               \
+       preempt_enable();                                               \
+} while (0)
+
+#define MIPS_R2BR_STATS(M)                                     \
+do {                                                           \
+       preempt_disable();                                      \
+       __this_cpu_inc(mipsr2bremustats.M);                     \
+       preempt_enable();                                       \
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M)          do { } while (0)
+#define MIPS_R2BR_STATS(M)        do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+       u32     mask;
+       u32     code;
+       int     (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+                         const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU       (cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
index 5b720d8c2745b2e8b891f38c5256db82a1232bc5..fef004434096596ebb54ecf8834dc04e9f3fd4c4 100644 (file)
 #define MIPS_CONF5_NF          (_ULCAST_(1) << 0)
 #define MIPS_CONF5_UFR         (_ULCAST_(1) << 2)
 #define MIPS_CONF5_MRP         (_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB         (_ULCAST_(1) << 4)
 #define MIPS_CONF5_MVH         (_ULCAST_(1) << 5)
 #define MIPS_CONF5_FRE         (_ULCAST_(1) << 8)
 #define MIPS_CONF5_UFE         (_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do {                                                                      \
 #define write_c0_config6(val)  __write_32bit_c0_register($16, 6, val)
 #define write_c0_config7(val)  __write_32bit_c0_register($16, 7, val)
 
+#define read_c0_lladdr()       __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val)   __write_ulong_c0_register($17, 0, val)
 #define read_c0_maar()         __read_ulong_c0_register($17, 1)
 #define write_c0_maar(val)     __write_ulong_c0_register($17, 1, val)
 #define read_c0_maari()                __read_32bit_c0_register($17, 2)
@@ -1909,6 +1912,7 @@ __BUILD_SET_C0(config5)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
+__BUILD_SET_C0(pagegrain)
 __BUILD_SET_C0(brcm_config_0)
 __BUILD_SET_C0(brcm_bus_pll)
 __BUILD_SET_C0(brcm_reset)
index c436138945a84dca9f1f311e54a801090c396853..1afa1f986df8c42a06e5f34355de8f8f1325c4bf 100644 (file)
@@ -1,9 +1,12 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#include <linux/atomic.h>
+
 typedef struct {
        unsigned long asid[NR_CPUS];
        void *vdso;
+       atomic_t fp_mode_switching;
 } mm_context_t;
 
 #endif /* __ASM_MMU_H */
index 2f82568a3ee4cf2caa9e55f3e4b1d2e25eb26090..45914b59824c11a14a9ec76e6fe016dccc3eaaaf 100644 (file)
@@ -25,7 +25,6 @@ do {                                                                  \
        if (cpu_has_htw) {                                              \
                write_c0_pwbase(pgd);                                   \
                back_to_back_c0_hazard();                               \
-               htw_reset();                                            \
        }                                                               \
 } while (0)
 
@@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
+       atomic_set(&mm->context.fp_mode_switching, 0);
+
        return 0;
 }
 
@@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        unsigned long flags;
        local_irq_save(flags);
 
+       htw_stop();
        /* Check if our ASID is of an older version and thus invalid */
        if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
                get_new_mmu_context(next, cpu);
@@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         */
        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        cpumask_set_cpu(cpu, mm_cpumask(next));
+       htw_start();
 
        local_irq_restore(flags);
 }
@@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
 
        local_irq_save(flags);
 
+       htw_stop();
        /* Unconditionally get a new ASID.  */
        get_new_mmu_context(next, cpu);
 
@@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
        /* mark mmu ownership change */
        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        cpumask_set_cpu(cpu, mm_cpumask(next));
+       htw_start();
 
        local_irq_restore(flags);
 }
@@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
        unsigned long flags;
 
        local_irq_save(flags);
+       htw_stop();
 
        if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
                get_new_mmu_context(mm, cpu);
@@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
                /* will get a new context next time */
                cpu_context(cpu, mm) = 0;
        }
+       htw_start();
        local_irq_restore(flags);
 }
 
index 800fe578dc99a5312aa78445a8420dc9c466b811..0aaf9a01ea505bad4754b6d56f7eda8f19b9de6f 100644 (file)
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
 #define MODULE_PROC_FAMILY "MIPS32_R1 "
 #elif defined CONFIG_CPU_MIPS32_R2
 #define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
 #elif defined CONFIG_CPU_MIPS64_R1
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #elif defined CONFIG_CPU_MIPS64_R2
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
 #elif defined CONFIG_CPU_R3000
 #define MODULE_PROC_FAMILY "R3000 "
 #elif defined CONFIG_CPU_TX39XX
index 75739c83f07e74bb26ab5dbc0fd32c34401c838b..8d05d90698238e4deb6bc0b649a7929e3d0e2b76 100644 (file)
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
                " lbu   %[ticket], %[now_serving]\n"
                "4:\n"
                ".set pop\n" :
-               [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+               [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
                [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
                [my_ticket] "=r"(my_ticket)
            );
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644 (file)
index 0000000..0c9c3e7
--- /dev/null
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2014 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RST_DEFS_H__
+#define __CVMX_RST_DEFS_H__
+
+#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
+#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
+#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
+#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
+#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
+#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
+#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
+#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
+#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
+#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
+#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
+#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
+
+union cvmx_rst_boot {
+       uint64_t u64;
+       struct cvmx_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t chipkill:1;
+               uint64_t jtcsrdis:1;
+               uint64_t ejtagdis:1;
+               uint64_t romen:1;
+               uint64_t ckill_ppdis:1;
+               uint64_t jt_tstmode:1;
+               uint64_t vrm_err:1;
+               uint64_t reserved_37_56:20;
+               uint64_t c_mul:7;
+               uint64_t pnr_mul:6;
+               uint64_t reserved_21_23:3;
+               uint64_t lboot_oci:3;
+               uint64_t lboot_ext:6;
+               uint64_t lboot:10;
+               uint64_t rboot:1;
+               uint64_t rboot_pin:1;
+#else
+               uint64_t rboot_pin:1;
+               uint64_t rboot:1;
+               uint64_t lboot:10;
+               uint64_t lboot_ext:6;
+               uint64_t lboot_oci:3;
+               uint64_t reserved_21_23:3;
+               uint64_t pnr_mul:6;
+               uint64_t c_mul:7;
+               uint64_t reserved_37_56:20;
+               uint64_t vrm_err:1;
+               uint64_t jt_tstmode:1;
+               uint64_t ckill_ppdis:1;
+               uint64_t romen:1;
+               uint64_t ejtagdis:1;
+               uint64_t jtcsrdis:1;
+               uint64_t chipkill:1;
+#endif
+       } s;
+       struct cvmx_rst_boot_s cn70xx;
+       struct cvmx_rst_boot_s cn70xxp1;
+       struct cvmx_rst_boot_s cn78xx;
+};
+
+union cvmx_rst_cfg {
+       uint64_t u64;
+       struct cvmx_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t bist_delay:58;
+               uint64_t reserved_3_5:3;
+               uint64_t cntl_clr_bist:1;
+               uint64_t warm_clr_bist:1;
+               uint64_t soft_clr_bist:1;
+#else
+               uint64_t soft_clr_bist:1;
+               uint64_t warm_clr_bist:1;
+               uint64_t cntl_clr_bist:1;
+               uint64_t reserved_3_5:3;
+               uint64_t bist_delay:58;
+#endif
+       } s;
+       struct cvmx_rst_cfg_s cn70xx;
+       struct cvmx_rst_cfg_s cn70xxp1;
+       struct cvmx_rst_cfg_s cn78xx;
+};
+
+union cvmx_rst_ckill {
+       uint64_t u64;
+       struct cvmx_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_47_63:17;
+               uint64_t timer:47;
+#else
+               uint64_t timer:47;
+               uint64_t reserved_47_63:17;
+#endif
+       } s;
+       struct cvmx_rst_ckill_s cn70xx;
+       struct cvmx_rst_ckill_s cn70xxp1;
+       struct cvmx_rst_ckill_s cn78xx;
+};
+
+union cvmx_rst_ctlx {
+       uint64_t u64;
+       struct cvmx_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_10_63:54;
+               uint64_t prst_link:1;
+               uint64_t rst_done:1;
+               uint64_t rst_link:1;
+               uint64_t host_mode:1;
+               uint64_t reserved_4_5:2;
+               uint64_t rst_drv:1;
+               uint64_t rst_rcv:1;
+               uint64_t rst_chip:1;
+               uint64_t rst_val:1;
+#else
+               uint64_t rst_val:1;
+               uint64_t rst_chip:1;
+               uint64_t rst_rcv:1;
+               uint64_t rst_drv:1;
+               uint64_t reserved_4_5:2;
+               uint64_t host_mode:1;
+               uint64_t rst_link:1;
+               uint64_t rst_done:1;
+               uint64_t prst_link:1;
+               uint64_t reserved_10_63:54;
+#endif
+       } s;
+       struct cvmx_rst_ctlx_s cn70xx;
+       struct cvmx_rst_ctlx_s cn70xxp1;
+       struct cvmx_rst_ctlx_s cn78xx;
+};
+
+union cvmx_rst_delay {
+       uint64_t u64;
+       struct cvmx_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_32_63:32;
+               uint64_t warm_rst_dly:16;
+               uint64_t soft_rst_dly:16;
+#else
+               uint64_t soft_rst_dly:16;
+               uint64_t warm_rst_dly:16;
+               uint64_t reserved_32_63:32;
+#endif
+       } s;
+       struct cvmx_rst_delay_s cn70xx;
+       struct cvmx_rst_delay_s cn70xxp1;
+       struct cvmx_rst_delay_s cn78xx;
+};
+
+union cvmx_rst_eco {
+       uint64_t u64;
+       struct cvmx_rst_eco_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_32_63:32;
+               uint64_t eco_rw:32;
+#else
+               uint64_t eco_rw:32;
+               uint64_t reserved_32_63:32;
+#endif
+       } s;
+       struct cvmx_rst_eco_s cn78xx;
+};
+
+union cvmx_rst_int {
+       uint64_t u64;
+       struct cvmx_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_12_63:52;
+               uint64_t perst:4;
+               uint64_t reserved_4_7:4;
+               uint64_t rst_link:4;
+#else
+               uint64_t rst_link:4;
+               uint64_t reserved_4_7:4;
+               uint64_t perst:4;
+               uint64_t reserved_12_63:52;
+#endif
+       } s;
+       struct cvmx_rst_int_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_11_63:53;
+               uint64_t perst:3;
+               uint64_t reserved_3_7:5;
+               uint64_t rst_link:3;
+#else
+               uint64_t rst_link:3;
+               uint64_t reserved_3_7:5;
+               uint64_t perst:3;
+               uint64_t reserved_11_63:53;
+#endif
+       } cn70xx;
+       struct cvmx_rst_int_cn70xx cn70xxp1;
+       struct cvmx_rst_int_s cn78xx;
+};
+
+union cvmx_rst_ocx {
+       uint64_t u64;
+       struct cvmx_rst_ocx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_3_63:61;
+               uint64_t rst_link:3;
+#else
+               uint64_t rst_link:3;
+               uint64_t reserved_3_63:61;
+#endif
+       } s;
+       struct cvmx_rst_ocx_s cn78xx;
+};
+
+union cvmx_rst_power_dbg {
+       uint64_t u64;
+       struct cvmx_rst_power_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_3_63:61;
+               uint64_t str:3;
+#else
+               uint64_t str:3;
+               uint64_t reserved_3_63:61;
+#endif
+       } s;
+       struct cvmx_rst_power_dbg_s cn78xx;
+};
+
+union cvmx_rst_pp_power {
+       uint64_t u64;
+       struct cvmx_rst_pp_power_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_48_63:16;
+               uint64_t gate:48;
+#else
+               uint64_t gate:48;
+               uint64_t reserved_48_63:16;
+#endif
+       } s;
+       struct cvmx_rst_pp_power_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_4_63:60;
+               uint64_t gate:4;
+#else
+               uint64_t gate:4;
+               uint64_t reserved_4_63:60;
+#endif
+       } cn70xx;
+       struct cvmx_rst_pp_power_cn70xx cn70xxp1;
+       struct cvmx_rst_pp_power_s cn78xx;
+};
+
+union cvmx_rst_soft_prstx {
+       uint64_t u64;
+       struct cvmx_rst_soft_prstx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_1_63:63;
+               uint64_t soft_prst:1;
+#else
+               uint64_t soft_prst:1;
+               uint64_t reserved_1_63:63;
+#endif
+       } s;
+       struct cvmx_rst_soft_prstx_s cn70xx;
+       struct cvmx_rst_soft_prstx_s cn70xxp1;
+       struct cvmx_rst_soft_prstx_s cn78xx;
+};
+
+union cvmx_rst_soft_rst {
+       uint64_t u64;
+       struct cvmx_rst_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_1_63:63;
+               uint64_t soft_rst:1;
+#else
+               uint64_t soft_rst:1;
+               uint64_t reserved_1_63:63;
+#endif
+       } s;
+       struct cvmx_rst_soft_rst_s cn70xx;
+       struct cvmx_rst_soft_rst_s cn70xxp1;
+       struct cvmx_rst_soft_rst_s cn78xx;
+};
+
+#endif
index e8a1c2fd52cdd8f3b65ffe68bf3eefa3628fbaa2..92b377e36dac260f9100d9178fe2252bed85e061 100644 (file)
@@ -45,6 +45,7 @@
  */
 
 #define OCTEON_FAMILY_MASK     0x00ffff00
+#define OCTEON_PRID_MASK       0x00ffffff
 
 /* Flag bits in top byte */
 /* Ignores revision in model checks */
 #define OM_MATCH_6XXX_FAMILY_MODELS    0x40000000
 /* Match all cnf7XXX Octeon models. */
 #define OM_MATCH_F7XXX_FAMILY_MODELS   0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS     0x10000000
+#define OM_MATCH_FAMILY_MODELS         (OM_MATCH_5XXX_FAMILY_MODELS |  \
+                                        OM_MATCH_6XXX_FAMILY_MODELS |  \
+                                        OM_MATCH_F7XXX_FAMILY_MODELS | \
+                                        OM_MATCH_7XXX_FAMILY_MODELS)
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CN73XX_PASS1_0  0x000d9700
+#define OCTEON_CN73XX          (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X  (OCTEON_CN73XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN70XX_PASS1_0  0x000d9600
+#define OCTEON_CN70XX_PASS1_1  0x000d9601
+#define OCTEON_CN70XX_PASS1_2  0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0  0x000d9608
+
+#define OCTEON_CN70XX          (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X  (OCTEON_CN70XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X  (OCTEON_CN70XX_PASS2_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX          OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0  0x000d9500
+#define OCTEON_CN78XX_PASS1_1  0x000d9501
+#define OCTEON_CN78XX_PASS2_0  0x000d9508
+
+#define OCTEON_CN78XX          (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X  (OCTEON_CN78XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X  (OCTEON_CN78XX_PASS2_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX          (0x000d9540 | OM_CHECK_SUBMODEL)
 
 /*
  * CNF7XXX models with new revision encoding
  */
 #define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_1  0x000d9401
 
 #define OCTEON_CNF71XX         (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN68XX_PASS1_1  0x000d9101
 #define OCTEON_CN68XX_PASS1_2  0x000d9102
 #define OCTEON_CN68XX_PASS2_0  0x000d9108
+#define OCTEON_CN68XX_PASS2_1   0x000d9109
+#define OCTEON_CN68XX_PASS2_2   0x000d910a
 
 #define OCTEON_CN68XX          (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN68XX_PASS1_X  (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS1_X  (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS2_X  (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX           OCTEON_CN63XX
+
 #define OCTEON_CN61XX_PASS1_0  0x000d9300
+#define OCTEON_CN61XX_PASS1_1   0x000d9301
 
 #define OCTEON_CN61XX          (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN61XX_PASS1_X  (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX           OCTEON_CN61XX
+
 /*
  * CN5XXX models with new revision encoding
  */
 #define OCTEON_CN58XX_PASS2_2  0x000d030a
 #define OCTEON_CN58XX_PASS2_3  0x000d030b
 
-#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN58XX_PASS1_X  (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS2_X  (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS1    OCTEON_CN58XX_PASS1_X
 #define OCTEON_CN3XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
 #define OCTEON_CN5XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
 #define OCTEON_CN6XXX          (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
-
-/* These are used to cover entire families of OCTEON processors */
-#define OCTEON_FAM_1           (OCTEON_CN3XXX)
-#define OCTEON_FAM_PLUS                (OCTEON_CN5XXX)
-#define OCTEON_FAM_1_PLUS      (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
-#define OCTEON_FAM_2           (OCTEON_CN6XXX)
+#define OCTEON_CNF7XXX         (OCTEON_CNF71XX_PASS1_0 | \
+                                OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX          (OCTEON_CN78XX_PASS1_0 | \
+                                OM_MATCH_7XXX_FAMILY_MODELS)
 
 /* The revision byte (low byte) has two different encodings.
  * CN3XXX:
  *     <4>:   alternate package
  *     <3:0>: revision
  *
- * CN5XXX:
+ * CN5XXX and older models:
  *
  *     bits
  *     <7>:   reserved (0)
 /* CN5XXX and later use different layout of bits in the revision ID field */
 #define OCTEON_58XX_FAMILY_MASK             OCTEON_38XX_FAMILY_MASK
 #define OCTEON_58XX_FAMILY_REV_MASK  0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK      0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK      0x00ffff40
 #define OCTEON_58XX_MODEL_REV_MASK   (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
-#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
 #define OCTEON_5XXX_MODEL_MASK      0x00ff0fc0
 
-/* forward declarations */
 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
 static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
 
 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
 
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
 /* NOTE: This for internal use only! */
 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)             \
 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0)  && ( \
@@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
                ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
                        && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
                ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL)  \
-                       && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
+                       && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
                ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
-                       && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
                ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
-                       && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) ||  \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+               ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+               ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
                ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
                        && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
                )))
@@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
 {
        uint32_t cpuid = cvmx_get_proc_id();
 
-       /*
-        * Check for special case of mismarked 3005 samples. We only
-        * need to check if the sub model isn't being ignored
-        */
-       if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
-               if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
-                       cpuid |= 0x10;
-       }
        return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
 }
 
@@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
 #define OCTEON_IS_COMMON_BINARY() 1
 #undef OCTEON_MODEL
 
+#define OCTEON_IS_OCTEON1()    OCTEON_IS_MODEL(OCTEON_CN3XXX)
+#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX)
+#define OCTEON_IS_OCTEON2()                                            \
+       (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3()    OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+#define OCTEON_IS_OCTEON1PLUS()        (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
+
 const char *__init octeon_model_get_string(uint32_t chip_id);
 
 /*
  * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
  */
 static inline uint32_t cvmx_get_octeon_family(void)
 {
index 6dfefd2d5cdfd7a39c0679da021a7338c298e490..0415965708565c7ddf8d5fe3451f67e907ce9a9c 100644 (file)
@@ -9,6 +9,7 @@
 #define __ASM_OCTEON_OCTEON_H
 
 #include <asm/octeon/cvmx.h>
+#include <asm/bitfield.h>
 
 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
                                                uint64_t alignment,
@@ -53,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long);
 #define OCTOEN_SERIAL_LEN      20
 
 struct octeon_boot_descriptor {
+#ifdef __BIG_ENDIAN_BITFIELD
        /* Start of block referenced by assembly code - do not change! */
        uint32_t desc_version;
        uint32_t desc_size;
@@ -104,77 +106,149 @@ struct octeon_boot_descriptor {
        uint8_t mac_addr_base[6];
        uint8_t mac_addr_count;
        uint64_t cvmx_desc_vaddr;
+#else
+       uint32_t desc_size;
+       uint32_t desc_version;
+       uint64_t stack_top;
+       uint64_t heap_base;
+       uint64_t heap_end;
+       /* Only used by bootloader */
+       uint64_t entry_point;
+       uint64_t desc_vaddr;
+       /* End of This block referenced by assembly code - do not change! */
+       uint32_t stack_size;
+       uint32_t exception_base_addr;
+       uint32_t argc;
+       uint32_t heap_size;
+       /*
+        * Argc count for application.
+        * Warning low bit scrambled in little-endian.
+        */
+       uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define  BOOT_FLAG_INIT_CORE           (1 << 0)
+#define  OCTEON_BL_FLAG_DEBUG          (1 << 1)
+#define  OCTEON_BL_FLAG_NO_MAGIC       (1 << 2)
+       /* If set, use uart1 for console */
+#define  OCTEON_BL_FLAG_CONSOLE_UART1  (1 << 3)
+       /* If set, use PCI console */
+#define  OCTEON_BL_FLAG_CONSOLE_PCI    (1 << 4)
+       /* Call exit on break on serial port */
+#define  OCTEON_BL_FLAG_BREAK          (1 << 5)
+
+       uint32_t core_mask;
+       uint32_t flags;
+       /* physical address of free memory descriptor block. */
+       uint32_t phy_mem_desc_addr;
+       /* DRAM size in megabyes. */
+       uint32_t dram_size;
+       /* CPU clock speed, in hz. */
+       uint32_t eclock_hz;
+       /* used to pass flags from app to debugger. */
+       uint32_t debugger_flags_base_addr;
+       /* SPI4 clock in hz. */
+       uint32_t spi_clock_hz;
+       /* DRAM clock speed, in hz. */
+       uint32_t dclock_hz;
+       uint8_t chip_rev_minor;
+       uint8_t chip_rev_major;
+       uint16_t chip_type;
+       uint8_t board_rev_minor;
+       uint8_t board_rev_major;
+       uint16_t board_type;
+
+       uint64_t unused1[4]; /* Not even filled in by bootloader. */
+
+       uint64_t cvmx_desc_vaddr;
+#endif
 };
 
 union octeon_cvmemctl {
        uint64_t u64;
        struct {
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t tlbbist:1;
+               __BITFIELD_FIELD(uint64_t tlbbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t l1cbist:1;
+               __BITFIELD_FIELD(uint64_t l1cbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t l1dbist:1;
+               __BITFIELD_FIELD(uint64_t l1dbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t dcmbist:1;
+               __BITFIELD_FIELD(uint64_t dcmbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t ptgbist:1;
+               __BITFIELD_FIELD(uint64_t ptgbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t wbfbist:1;
+               __BITFIELD_FIELD(uint64_t wbfbist:1,
                /* Reserved */
-               uint64_t reserved:22;
+               __BITFIELD_FIELD(uint64_t reserved:17,
+               /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
+                * This field selects between the TLB replacement policies:
+                * bitmask LRU or NLU. Bitmask LRU maintains a mask of
+                * recently used TLB entries and avoids them as new entries
+                * are allocated. NLU simply guarantees that the next
+                * allocation is not the last used TLB entry. */
+               __BITFIELD_FIELD(uint64_t tlbnlu:1,
+               /* OCTEON II - Selects the bit in the counter used for
+                * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
+                * cycles. If not already released, the cnMIPS II core will
+                * always release a given PAUSE instruction within
+                * 2(8+PAUSETIME). If the counter trip happens to line up,
+                * the cnMIPS II core may release the PAUSE instantly. */
+               __BITFIELD_FIELD(uint64_t pausetime:3,
+               /* OCTEON II - This field is an extension of
+                * CvmMemCtl[DIDTTO] */
+               __BITFIELD_FIELD(uint64_t didtto2:1,
                /* R/W If set, marked write-buffer entries time out
                 * the same as as other entries; if clear, marked
                 * write-buffer entries use the maximum timeout. */
-               uint64_t dismarkwblongto:1;
+               __BITFIELD_FIELD(uint64_t dismarkwblongto:1,
                /* R/W If set, a merged store does not clear the
                 * write-buffer entry timeout state. */
-               uint64_t dismrgclrwbto:1;
+               __BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
                /* R/W Two bits that are the MSBs of the resultant
                 * CVMSEG LM word location for an IOBDMA. The other 8
                 * bits come from the SCRADDR field of the IOBDMA. */
-               uint64_t iobdmascrmsb:2;
+               __BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
                /* R/W If set, SYNCWS and SYNCS only order marked
                 * stores; if clear, SYNCWS and SYNCS only order
                 * unmarked stores. SYNCWSMARKED has no effect when
                 * DISSYNCWS is set. */
-               uint64_t syncwsmarked:1;
+               __BITFIELD_FIELD(uint64_t syncwsmarked:1,
                /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
                 * SYNC. */
-               uint64_t dissyncws:1;
+               __BITFIELD_FIELD(uint64_t dissyncws:1,
                /* R/W If set, no stall happens on write buffer
                 * full. */
-               uint64_t diswbfst:1;
+               __BITFIELD_FIELD(uint64_t diswbfst:1,
                /* R/W If set (and SX set), supervisor-level
                 * loads/stores can use XKPHYS addresses with
                 * VA<48>==0 */
-               uint64_t xkmemenas:1;
+               __BITFIELD_FIELD(uint64_t xkmemenas:1,
                /* R/W If set (and UX set), user-level loads/stores
                 * can use XKPHYS addresses with VA<48>==0 */
-               uint64_t xkmemenau:1;
+               __BITFIELD_FIELD(uint64_t xkmemenau:1,
                /* R/W If set (and SX set), supervisor-level
                 * loads/stores can use XKPHYS addresses with
                 * VA<48>==1 */
-               uint64_t xkioenas:1;
+               __BITFIELD_FIELD(uint64_t xkioenas:1,
                /* R/W If set (and UX set), user-level loads/stores
                 * can use XKPHYS addresses with VA<48>==1 */
-               uint64_t xkioenau:1;
+               __BITFIELD_FIELD(uint64_t xkioenau:1,
                /* R/W If set, all stores act as SYNCW (NOMERGE must
                 * be set when this is set) RW, reset to 0. */
-               uint64_t allsyncw:1;
+               __BITFIELD_FIELD(uint64_t allsyncw:1,
                /* R/W If set, no stores merge, and all stores reach
                 * the coherent bus in order. */
-               uint64_t nomerge:1;
+               __BITFIELD_FIELD(uint64_t nomerge:1,
                /* R/W Selects the bit in the counter used for DID
                 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
                 * 214. Actual time-out is between 1x and 2x this
                 * interval. For example, with DIDTTO=3, expiration
                 * interval is between 16K and 32K. */
-               uint64_t didtto:2;
+               __BITFIELD_FIELD(uint64_t didtto:2,
                /* R/W If set, the (mem) CSR clock never turns off. */
-               uint64_t csrckalwys:1;
+               __BITFIELD_FIELD(uint64_t csrckalwys:1,
                /* R/W If set, mclk never turns off. */
-               uint64_t mclkalwys:1;
+               __BITFIELD_FIELD(uint64_t mclkalwys:1,
                /* R/W Selects the bit in the counter used for write
                 * buffer flush time-outs (WBFLT+11) is the bit
                 * position in an internal counter used to determine
@@ -182,25 +256,26 @@ union octeon_cvmemctl {
                 * 2x this interval. For example, with WBFLT = 0, a
                 * write buffer expires between 2K and 4K cycles after
                 * the write buffer entry is allocated. */
-               uint64_t wbfltime:3;
+               __BITFIELD_FIELD(uint64_t wbfltime:3,
                /* R/W If set, do not put Istream in the L2 cache. */
-               uint64_t istrnol2:1;
+               __BITFIELD_FIELD(uint64_t istrnol2:1,
                /* R/W The write buffer threshold. */
-               uint64_t wbthresh:4;
+               __BITFIELD_FIELD(uint64_t wbthresh:4,
                /* Reserved */
-               uint64_t reserved2:2;
+               __BITFIELD_FIELD(uint64_t reserved2:2,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * kernel/debug mode. */
-               uint64_t cvmsegenak:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenak:1,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * supervisor mode. */
-               uint64_t cvmsegenas:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenas:1,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * user mode. */
-               uint64_t cvmsegenau:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenau:1,
                /* R/W Size of local memory in cache blocks, 54 (6912
                 * bytes) is max legal value. */
-               uint64_t lmemsz:6;
+               __BITFIELD_FIELD(uint64_t lmemsz:6,
+               ;)))))))))))))))))))))))))))))))))
        } s;
 };
 
@@ -224,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
        cvmx_read64_uint32(address ^ 4);
 }
 
+/* Octeon multiplier save/restore routines from octeon_switch.S */
+void octeon_mult_save(void);
+void octeon_mult_restore(void);
+void octeon_mult_save_end(void);
+void octeon_mult_restore_end(void);
+void octeon_mult_save3(void);
+void octeon_mult_save3_end(void);
+void octeon_mult_save2(void);
+void octeon_mult_save2_end(void);
+void octeon_mult_restore3(void);
+void octeon_mult_restore3_end(void);
+void octeon_mult_restore2(void);
+void octeon_mult_restore2_end(void);
 
 /**
  * Read a 32bit value from the Octeon NPI register space
index 69529624a0050713b120ecfd3b430a601dfb567b..193b4c6b7541a774f3a0ccfbaa8bb7f7add55b83 100644 (file)
@@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 }
 #endif
 
+#ifdef CONFIG_PCI_DOMAINS
 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
 
 static inline int pci_proc_domain(struct pci_bus *bus)
@@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
        struct pci_controller *hose = bus->sysdata;
        return hose->need_domain_info;
 }
+#endif /* CONFIG_PCI_DOMAINS */
 
 #endif /* __KERNEL__ */
 
index fc807aa5ec8d7593ef8bda4c13e184ae31d79a70..91747c282bb3fb3f1f6560cb5a722089aac9741e 100644 (file)
@@ -35,7 +35,7 @@
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
 /*
- * The following bits are directly used by the TLB hardware
+ * The following bits are implemented by the TLB hardware
  */
 #define _PAGE_GLOBAL_SHIFT     0
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
 #define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
 #define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
-#define _PAGE_SILENT_READ      _PAGE_VALID
-#define _PAGE_SILENT_WRITE     _PAGE_DIRTY
-
 #define _PFN_SHIFT             (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 /*
- * The following are implemented by software
+ * The following bits are implemented in software
  */
-#define _PAGE_PRESENT_SHIFT    0
-#define _PAGE_PRESENT          (1 <<  _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT       1
-#define _PAGE_READ             (1 <<  _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT      2
-#define _PAGE_WRITE            (1 <<  _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT   3
-#define _PAGE_ACCESSED         (1 <<  _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT   4
-#define _PAGE_MODIFIED         (1 <<  _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT       (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ             (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT      (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT   (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED         (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
 /*
- * And these are the hardware TLB bits
+ * The following bits are implemented by the TLB hardware
  */
-#define _PAGE_GLOBAL_SHIFT     8
-#define _PAGE_GLOBAL           (1 <<  _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT      9
-#define _PAGE_VALID            (1 <<  _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ      (1 <<  _PAGE_VALID_SHIFT)       /* synonym  */
-#define _PAGE_DIRTY_SHIFT      10
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_MODIFIED_SHIFT + 4)
+#define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
+#define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE     (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT  11
+#define _CACHE_UNCACHED_SHIFT  (_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_UNCACHED                (1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK            (1 << _CACHE_UNCACHED_SHIFT)
+#define _CACHE_MASK            _CACHE_UNCACHED
 
-#else /* 'Normal' r4K case */
+#define _PFN_SHIFT             PAGE_SHIFT
+
+#else
 /*
  * When using the RI/XI bit support, we have 13 bits of flags below
  * the physical address. The RI/XI bits are placed such that a SRL 5
 
 /*
  * The following bits are implemented in software
- *
- * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
  */
-#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 #define _PAGE_READ_SHIFT       (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
 /* huge tlb page */
 #define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_HUGE             (1 << _PAGE_HUGE_SHIFT)
-#else
-#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
-#endif
-
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
 #else
+#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING                ({BUG(); 1; })  /* Dummy value */
 #endif
 
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
-/* synonym                */
-#define _PAGE_SILENT_READ      (_PAGE_VALID)
-
-/* The MIPS dirty bit     */
 #define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE     (_PAGE_DIRTY)
-
 #define _CACHE_SHIFT           (_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_MASK            (7 << _CACHE_SHIFT)
 
 
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
-#ifndef _PFN_SHIFT
-#define _PFN_SHIFT                 PAGE_SHIFT
-#endif
+#define _PAGE_SILENT_READ      _PAGE_VALID
+#define _PAGE_SILENT_WRITE     _PAGE_DIRTY
+
 #define _PFN_MASK              (~((1 << (_PFN_SHIFT)) - 1))
 
 #ifndef _PAGE_NO_READ
 #ifndef _PAGE_NO_EXEC
 #define _PAGE_NO_EXEC ({BUG(); 0; })
 #endif
-#ifndef _PAGE_GLOBAL_SHIFT
-#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
-#endif
 
 
 #ifndef __ASSEMBLY__
@@ -266,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 #endif
 
 #define __READABLE     (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
-#define __WRITEABLE    (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
+#define __WRITEABLE    (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
 
-#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED |      \
+                        _PFN_MASK | _CACHE_MASK)
 
 #endif /* _ASM_PGTABLE_BITS_H */
index 583ff42154794da86c5cc9b64ae9e81e4507e057..bef782c4a44bd33b20f24dda9422f401e2324acc 100644 (file)
@@ -99,29 +99,35 @@ extern void paging_init(void);
 
 #define htw_stop()                                                     \
 do {                                                                   \
-       if (cpu_has_htw)                                                \
-               write_c0_pwctl(read_c0_pwctl() &                        \
-                              ~(1 << MIPS_PWCTL_PWEN_SHIFT));          \
+       unsigned long flags;                                            \
+                                                                       \
+       if (cpu_has_htw) {                                              \
+               local_irq_save(flags);                                  \
+               if(!raw_current_cpu_data.htw_seq++) {                   \
+                       write_c0_pwctl(read_c0_pwctl() &                \
+                                      ~(1 << MIPS_PWCTL_PWEN_SHIFT));  \
+                       back_to_back_c0_hazard();                       \
+               }                                                       \
+               local_irq_restore(flags);                               \
+       }                                                               \
 } while(0)
 
 #define htw_start()                                                    \
 do {                                                                   \
-       if (cpu_has_htw)                                                \
-               write_c0_pwctl(read_c0_pwctl() |                        \
-                              (1 << MIPS_PWCTL_PWEN_SHIFT));           \
-} while(0)
-
-
-#define htw_reset()                                                    \
-do {                                                                   \
+       unsigned long flags;                                            \
+                                                                       \
        if (cpu_has_htw) {                                              \
-               htw_stop();                                             \
-               back_to_back_c0_hazard();                               \
-               htw_start();                                            \
-               back_to_back_c0_hazard();                               \
+               local_irq_save(flags);                                  \
+               if (!--raw_current_cpu_data.htw_seq) {                  \
+                       write_c0_pwctl(read_c0_pwctl() |                \
+                                      (1 << MIPS_PWCTL_PWEN_SHIFT));   \
+                       back_to_back_c0_hazard();                       \
+               }                                                       \
+               local_irq_restore(flags);                               \
        }                                                               \
 } while(0)
 
+
 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
        pte_t pteval);
 
@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
 {
        pte_t null = __pte(0);
 
+       htw_stop();
        /* Preserve global status for the pair */
        if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
                null.pte_low = null.pte_high = _PAGE_GLOBAL;
 
        set_pte_at(mm, addr, ptep, null);
-       htw_reset();
+       htw_start();
 }
 #else
 
@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+       htw_stop();
 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
        /* Preserve global status for the pair */
        if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
        else
 #endif
                set_pte_at(mm, addr, ptep, __pte(0));
-       htw_reset();
+       htw_start();
 }
 #endif
 
@@ -334,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
        return pte;
 }
 
-#ifdef _PAGE_HUGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 static inline int pte_huge(pte_t pte)  { return pte_val(pte) & _PAGE_HUGE; }
 
 static inline pte_t pte_mkhuge(pte_t pte)
@@ -342,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
        pte_val(pte) |= _PAGE_HUGE;
        return pte;
 }
-#endif /* _PAGE_HUGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 #endif
 static inline int pte_special(pte_t pte)       { return 0; }
 static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
index f1df4cb4a286dc8f76f1186ca9b2b1c439ce1764..b5dcbee01fd7a52641584cbbf8b80848f7c6f4b9 100644 (file)
@@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count;
 #define TASK_SIZE      0x7fff8000UL
 #endif
 
-#ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
-#endif
 
 #define TASK_IS_32BIT_ADDR 1
 
@@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count;
 #define TASK_SIZE32    0x7fff8000UL
 #define TASK_SIZE64    0x10000000000UL
 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
-
-#ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE64
-#endif
-
 
 #define TASK_SIZE_OF(tsk)                                              \
        (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
@@ -211,6 +205,8 @@ struct octeon_cop2_state {
        unsigned long   cop2_gfm_poly;
        /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
        unsigned long   cop2_gfm_result[2];
+       /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
+       unsigned long   cop2_sha3[2];
 };
 #define COP2_INIT                                              \
        .cp2                    = {0,},
@@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p);
 
 #endif
 
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern int mips_get_process_fp_mode(struct task_struct *task);
+extern int mips_set_process_fp_mode(struct task_struct *task,
+                                   unsigned int value);
+
+#define GET_FP_MODE(task)              mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value)                mips_set_process_fp_mode(task, value)
+
 #endif /* _ASM_PROCESSOR_H */
index eaa26270a5e574bae3db56202a22434c4c4b930f..8ebc2aa5f3e1331fd0c84f2ba54675a4128a06a2 100644 (file)
@@ -24,13 +24,6 @@ struct boot_param_header;
 extern void __dt_setup_arch(void *bph);
 extern int __dt_register_buses(const char *bus0, const char *bus1);
 
-#define dt_setup_arch(sym)                                             \
-({                                                                     \
-       extern char __dtb_##sym##_begin[];                              \
-                                                                       \
-       __dt_setup_arch(__dtb_##sym##_begin);                           \
-})
-
 #else /* CONFIG_OF */
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
index fc783f843bdc4272ccecfeddab877044a418703e..ffc320389f40a011ac6c66ef9c453fce23e38c34 100644 (file)
@@ -40,8 +40,8 @@ struct pt_regs {
        unsigned long cp0_cause;
        unsigned long cp0_epc;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-       unsigned long long mpl[3];        /* MTM{0,1,2} */
-       unsigned long long mtp[3];        /* MTP{0,1,2} */
+       unsigned long long mpl[6];        /* MTM{0-5} */
+       unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
 } __aligned(8);
 
index e293a8d89a6da590a3e46be5fccc60f1fc2c1983..1b22d2da88a1ec1b76e42dfde3cbff5cb5b422c2 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/asm.h>
 #include <asm/cacheops.h>
+#include <asm/compiler.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noreorder                               \n"     \
-       "       .set    arch=r4000                              \n"     \
+       "       .set "MIPS_ISA_ARCH_LEVEL"                      \n"     \
        "       cache   %0, %1                                  \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
-       "       .set    arch=r4000              \n"             \
+       "       .set "MIPS_ISA_ARCH_LEVEL"      \n"             \
        "1:     cache   %0, (%1)                \n"             \
        "2:     .set    pop                     \n"             \
        "       .section __ex_table,\"a\"       \n"             \
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
        cache_op(Page_Invalidate_T, addr);
 }
 
+#ifndef CONFIG_CPU_MIPSR6
 #define cache16_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
        "       .set push                                       \n"     \
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
                : "r" (base),                                           \
                  "i" (op));
 
+#else
+/*
+ * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
+ * This means we now need to increment the base register before we flush
+ * more cache lines
+ */
+#define cache16_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x010(%0)\n"     \
+       "       cache %1, 0x020(%0); cache %1, 0x030(%0)\n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x050(%0)\n"     \
+       "       cache %1, 0x060(%0); cache %1, 0x070(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x090(%0)\n"     \
+       "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"     \
+       "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"     \
+       "       addiu $1, $0, 0x100                     \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x010($1)\n"     \
+       "       cache %1, 0x020($1); cache %1, 0x030($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x050($1)\n"     \
+       "       cache %1, 0x060($1); cache %1, 0x070($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x090($1)\n"     \
+       "       cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"     \
+       "       cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache32_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x020(%0)\n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x060(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       addiu $1, $1, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       addiu $1, $1, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache64_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x040(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache128_unroll32(base,op)                             \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 /*
  * Perform the cache operation specified by op using a user mode virtual
  * address while in kernel mode.
index 753275accd1892142151169ed8ca722e41742d14..195db5045ae57fa972096417a41ba76b4048a4e5 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _ASM_SGIALIB_H
 #define _ASM_SGIALIB_H
 
+#include <linux/compiler.h>
 #include <asm/sgiarcs.h>
 
 extern struct linux_romvec *romvec;
@@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 
 /* Misc. routines. */
-extern VOID ArcReboot(VOID) __attribute__((noreturn));
-extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn));
+extern VOID ArcHalt(VOID) __noreturn;
+extern VOID ArcPowerDown(VOID) __noreturn;
+extern VOID ArcRestart(VOID) __noreturn;
+extern VOID ArcReboot(VOID) __noreturn;
+extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
 extern VOID ArcFlushAllCaches(VOID);
 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
 
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
deleted file mode 100644 (file)
index dd9a762..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
- * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
- */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-/*
- * Duplicated here because of <asm-generic/siginfo.h> braindamage ...
- */
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
-       if (from->si_code < 0)
-               memcpy(to, from, sizeof(*to));
-       else
-               /* _sigchld is currently the largest know union member */
-               memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
-}
-
-#endif /* _ASM_SIGINFO_H */
index c6d06d383ef90df1cf7bb8a4f69aaa641b40e7fd..b4548690ade9916e1d94e844641a505fad43bea4 100644 (file)
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        subu   %[ticket], %[ticket], 1                 \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [serving_now_ptr] "+m" (lock->h.serving_now),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        subu   %[ticket], %[ticket], 1                 \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [serving_now_ptr] "+m" (lock->h.serving_now),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "        li     %[ticket], 0                            \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (tmp2),
                  [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "        li     %[ticket], 0                            \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (tmp2),
                  [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                        "       bltz    %1, 1b                          \n"
                        "        addu   %1, 1                           \n"
                        "2:     sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
        smp_llsc_mb();
 }
 
-/* Note the use of sub, not subu which will make the kernel die with an
-   overflow exception if we ever try to unlock an rwlock that is already
-   unlocked or is being held by a writer.  */
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
                "1:     ll      %1, %2          # arch_read_unlock      \n"
-               "       sub     %1, 1                                   \n"
+               "       addiu   %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
                        __asm__ __volatile__(
                        "1:     ll      %1, %2  # arch_read_unlock      \n"
-                       "       sub     %1, 1                           \n"
+                       "       addiu   %1, -1                          \n"
                        "       sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                        "       bnez    %1, 1b                          \n"
                        "        lui    %1, 0x8000                      \n"
                        "2:     sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                __asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        }
 
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                "       li      %2, 1                                   \n"
                "       .set    reorder                                 \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                        "       sc      %1, %0                          \n"
                        "       li      %2, 1                           \n"
                        "2:                                             \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
                          "=&r" (ret)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
 
index 0b89006e490788ffcc26a9225762c66f4bd4ea8b..0f90d88e464d3cc005ab3ae0411d6996c5d41e1a 100644 (file)
@@ -1,10 +1,10 @@
 #ifndef _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_MIPS_SPRAM)
 extern __init void spram_config(void);
 #else
 static inline void spram_config(void) { };
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_MIPS_SPRAM */
 
 #endif /* _MIPS_SPRAM_H */
index b188c797565ce48bac812aacd98922ef31c00180..28d6d9364bd1f2c431df08c72f58262e5297ec5c 100644 (file)
@@ -40,7 +40,7 @@
                LONG_S  v1, PT_HI(sp)
                mflhxu  v1
                LONG_S  v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                mfhi    v1
 #endif
 #ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
                LONG_S  $10, PT_R10(sp)
                LONG_S  $11, PT_R11(sp)
                LONG_S  $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_HI(sp)
                mflo    v1
 #endif
@@ -58,7 +58,7 @@
                LONG_S  $14, PT_R14(sp)
                LONG_S  $15, PT_R15(sp)
                LONG_S  $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_LO(sp)
 #endif
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
                mtlhx   $24
                LONG_L  $24, PT_LO(sp)
                mtlhx   $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                LONG_L  $24, PT_LO(sp)
                mtlo    $24
                LONG_L  $24, PT_HI(sp)
index b928b6f898cd5266efe89465d2dc87089f8f357c..e92d6c4b5ed192305b0b1f1605481f745cfadb10 100644 (file)
@@ -75,9 +75,12 @@ do {                                                                 \
 #endif
 
 #define __clear_software_ll_bit()                                      \
-do {                                                                   \
-       if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)       \
-               ll_bit = 0;                                             \
+do {   if (cpu_has_rw_llb) {                                           \
+               write_c0_lladdr(0);                                     \
+       } else {                                                        \
+               if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
+                       ll_bit = 0;                                     \
+       }                                                               \
 } while (0)
 
 #define switch_to(prev, next, last)                                    \
index 9e1295f874f0c143f3924aac7e606b93125ed9ce..55ed6602204cae5ae1219ee15f5a98b2c9f2813e 100644 (file)
@@ -28,7 +28,7 @@ struct thread_info {
        unsigned long           tp_value;       /* thread pointer */
        __u32                   cpu;            /* current CPU */
        int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
-
+       int                     r2_emul_return; /* 1 => Returning from R2 emulator */
        mm_segment_t            addr_limit;     /*
                                                 * thread address space limit:
                                                 * 0x7fffffff for user-thead
index 89c22433b1c665ccf06c278f4bdf306642ffc828..fc0cf5ac0cf72ce28a38eec08a422719343ba4e4 100644 (file)
 enum major_op {
        spec_op, bcond_op, j_op, jal_op,
        beq_op, bne_op, blez_op, bgtz_op,
-       addi_op, addiu_op, slti_op, sltiu_op,
+       addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
        andi_op, ori_op, xori_op, lui_op,
        cop0_op, cop1_op, cop2_op, cop1x_op,
        beql_op, bnel_op, blezl_op, bgtzl_op,
-       daddi_op, daddiu_op, ldl_op, ldr_op,
+       daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
        spec2_op, jalx_op, mdmx_op, spec3_op,
        lb_op, lh_op, lwl_op, lw_op,
        lbu_op, lhu_op, lwr_op, lwu_op,
        sb_op, sh_op, swl_op, sw_op,
        sdl_op, sdr_op, swr_op, cache_op,
-       ll_op, lwc1_op, lwc2_op, pref_op,
-       lld_op, ldc1_op, ldc2_op, ld_op,
-       sc_op, swc1_op, swc2_op, major_3b_op,
-       scd_op, sdc1_op, sdc2_op, sd_op
+       ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
+       lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+       sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
+       scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
 };
 
 /*
@@ -83,9 +83,12 @@ enum spec3_op {
        swe_op    = 0x1f, bshfl_op  = 0x20,
        swle_op   = 0x21, swre_op   = 0x22,
        prefe_op  = 0x23, dbshfl_op = 0x24,
-       lbue_op   = 0x28, lhue_op   = 0x29,
-       lbe_op    = 0x2c, lhe_op    = 0x2d,
-       lle_op    = 0x2e, lwe_op    = 0x2f,
+       cache6_op = 0x25, sc6_op    = 0x26,
+       scd6_op   = 0x27, lbue_op   = 0x28,
+       lhue_op   = 0x29, lbe_op    = 0x2c,
+       lhe_op    = 0x2d, lle_op    = 0x2e,
+       lwe_op    = 0x2f, pref6_op  = 0x35,
+       ll6_op    = 0x36, lld6_op   = 0x37,
        rdhwr_op  = 0x3b
 };
 
@@ -112,7 +115,8 @@ enum cop_op {
        mfhc_op       = 0x03, mtc_op        = 0x04,
        dmtc_op       = 0x05, ctc_op        = 0x06,
        mthc0_op      = 0x06, mthc_op       = 0x07,
-       bc_op         = 0x08, cop_op        = 0x10,
+       bc_op         = 0x08, bc1eqz_op     = 0x09,
+       bc1nez_op     = 0x0d, cop_op        = 0x10,
        copm_op       = 0x18
 };
 
index d08f83f19db566899298e41ac06bf53f3a757400..2cb7fdead5702a5c8b5f0522e0c9f3e8e4ee96ba 100644 (file)
 
 #define HAVE_ARCH_SIGINFO_T
 
-/*
- * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked
- * by design ...
- */
-#define HAVE_ARCH_COPY_SIGINFO
-struct siginfo;
-
 /*
  * Careful to keep union _sifields from shifting ...
  */
@@ -35,8 +28,9 @@ struct siginfo;
 
 #define __ARCH_SIGSYS
 
-#include <asm-generic/siginfo.h>
+#include <uapi/asm-generic/siginfo.h>
 
+/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
 typedef struct siginfo {
        int si_signo;
        int si_code;
@@ -124,5 +118,6 @@ typedef struct siginfo {
 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
 
+#include <asm-generic/siginfo.h>
 
 #endif /* _UAPI_ASM_SIGINFO_H */
index c454525e7695704de74a777a7d1801e60ea20a1d..9dd051edb411f2a5b68f2f5b4ae77bfbe6115d57 100644 (file)
@@ -140,10 +140,18 @@ static void qi_lb60_nand_ident(struct platform_device *pdev,
 
 static struct jz_nand_platform_data qi_lb60_nand_pdata = {
        .ident_callback = qi_lb60_nand_ident,
-       .busy_gpio = 94,
        .banks = { 1 },
 };
 
+static struct gpiod_lookup_table qi_lb60_nand_gpio_table = {
+       .dev_id = "jz4740-nand.0",
+       .table = {
+               GPIO_LOOKUP("Bank C", 30, "busy", 0),
+               { },
+       },
+};
+
+
 /* Keyboard*/
 
 #define KEY_QI_QI      KEY_F13
@@ -472,6 +480,7 @@ static int __init qi_lb60_init_platform_devices(void)
        jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata;
 
        gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
+       gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
 
        jz4740_serial_device_register();
 
index 92987d1bbe5fe26e957ee8b9a1b5737f6181204c..d3d2ff2d76dc8f2e3643e9255226975e8e4699a4 100644 (file)
@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP)     += smp-mt.o
 obj-$(CONFIG_MIPS_CMP)         += smp-cmp.o
 obj-$(CONFIG_MIPS_CPS)         += smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_GIC_IPI)     += smp-gic.o
-obj-$(CONFIG_CPU_MIPSR2)       += spram.o
+obj-$(CONFIG_MIPS_SPRAM)       += spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_EARLY_PRINTK_8250)        += early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR)    += mips-r2-to-r6-emul.o
 
 CFLAGS_cpu-bugs64.o    = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
index 3b2dfdb4865fd9cbe208e41ca38e709db952c0e1..750d67ac41e9b19affe066d5be8d1f56f7363041 100644 (file)
@@ -97,6 +97,7 @@ void output_thread_info_defines(void)
        OFFSET(TI_TP_VALUE, thread_info, tp_value);
        OFFSET(TI_CPU, thread_info, cpu);
        OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+       OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
        OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
        OFFSET(TI_REGS, thread_info, regs);
        DEFINE(_THREAD_SIZE, THREAD_SIZE);
@@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void)
        OFFSET(OCTEON_CP2_GFM_RESULT,   octeon_cop2_state, cop2_gfm_result);
        OFFSET(OCTEON_CP2_HSH_DATW,     octeon_cop2_state, cop2_hsh_datw);
        OFFSET(OCTEON_CP2_HSH_IVW,      octeon_cop2_state, cop2_hsh_ivw);
+       OFFSET(OCTEON_CP2_SHA3,         octeon_cop2_state, cop2_sha3);
        OFFSET(THREAD_CP2,      task_struct, thread.cp2);
        OFFSET(THREAD_CVMSEG,   task_struct, thread.cvmseg.cvmseg);
        BLANK();
index 4d7d99d601cc13219e9d8f9631da6002b3d9df9d..c2e0f45ddf6cf48d05f7b97b5a095de0a04fca19 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
  * @returns:   -EFAULT on error and forces SIGBUS, and on success
  *             returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *             evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ *     Compact branches do not throw exceptions because they do
+ *     not have delay slots. The forbidden slot instruction ($PC+4)
+ *     is only executed if the branch was not taken. Otherwise the
+ *     forbidden slot is skipped entirely. This means that the
+ *     only possible reason to be here because of a MIPS R6 compact
+ *     branch instruction is that the forbidden slot has thrown one.
+ *     In that case the branch was not taken, so the EPC can be safely
+ *     set to EPC + 8.
  */
 int __compute_return_epc_for_insn(struct pt_regs *regs,
                                   union mips_instruction insn)
 {
-       unsigned int bit, fcr31, dspcontrol;
+       unsigned int bit, fcr31, dspcontrol, reg;
        long epc = regs->cp0_epc;
        int ret = 0;
 
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->regs[insn.r_format.rd] = epc + 8;
                        /* Fall through */
                case jr_op:
+                       if (NO_R6EMU && insn.r_format.func == jr_op)
+                               goto sigill_r6;
                        regs->cp0_epc = regs->regs[insn.r_format.rs];
                        break;
                }
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         */
        case bcond_op:
                switch (insn.i_format.rt) {
-               case bltz_op:
                case bltzl_op:
+                       if (NO_R6EMU)
+                               goto sigill_r6;
+               case bltz_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->cp0_epc = epc;
                        break;
 
-               case bgez_op:
                case bgezl_op:
+                       if (NO_R6EMU)
+                               goto sigill_r6;
+               case bgez_op:
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bltzal_op:
                case bltzall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bltzall_op)) {
+                               ret = -SIGILL;
+                               break;
+                       }
                        regs->regs[31] = epc + 8;
+                       /*
+                        * OK we are here either because we hit a NAL
+                        * instruction or because we are emulating an
+                        * old bltzal{,l} one. Lets figure out what the
+                        * case really is.
+                        */
+                       if (!insn.i_format.rs) {
+                               /*
+                                * NAL or BLTZAL with rs == 0
+                                * Doesn't matter if we are R6 or not. The
+                                * result is the same
+                                */
+                               regs->cp0_epc += 4 +
+                                       (insn.i_format.simmediate << 2);
+                               break;
+                       }
+                       /* Now do the real thing for non-R6 BLTZAL{,L} */
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bgezal_op:
                case bgezall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bgezall_op)) {
+                               ret = -SIGILL;
+                               break;
+                       }
                        regs->regs[31] = epc + 8;
+                       /*
+                        * OK we are here either because we hit a BAL
+                        * instruction or because we are emulating an
+                        * old bgezal{,l} one. Lets figure out what the
+                        * case really is.
+                        */
+                       if (!insn.i_format.rs) {
+                               /*
+                                * BAL or BGEZAL with rs == 0
+                                * Doesn't matter if we are R6 or not. The
+                                * result is the same
+                                */
+                               regs->cp0_epc += 4 +
+                                       (insn.i_format.simmediate << 2);
+                               break;
+                       }
+                       /* Now do the real thing for non-R6 BGEZAL{,L} */
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bposge32_op:
                        if (!cpu_has_dsp)
-                               goto sigill;
+                               goto sigill_dsp;
 
                        dspcontrol = rddsp(0x01);
 
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
        /*
         * These are conditional and in i_format.
         */
-       case beq_op:
        case beql_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case beq_op:
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case bne_op:
        case bnel_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case bne_op:
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case blez_op: /* not really i_format */
-       case blezl_op:
+       case blezl_op: /* not really i_format */
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case blez_op:
+               /*
+                * Compact branches for R6 for the
+                * blez and blezl opcodes.
+                * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+                * BLEZ  | rs = rt != 0      == BGEZALC
+                * BLEZ  | rs != 0 | rt != 0 == BGEUC
+                * BLEZL | rs = 0 | rt != 0  == BLEZC
+                * BLEZL | rs = rt != 0      == BGEZC
+                * BLEZL | rs != 0 | rt != 0 == BGEC
+                *
+                * For real BLEZ{,L}, rt is always 0.
+                */
+
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = epc + 4;
+                       regs->cp0_epc += 8;
+                       break;
+               }
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] <= 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case bgtz_op:
        case bgtzl_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case bgtz_op:
+               /*
+                * Compact branches for R6 for the
+                * bgtz and bgtzl opcodes.
+                * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+                * BGTZ  | rs = rt != 0      == BLTZALC
+                * BGTZ  | rs != 0 | rt != 0 == BLTUC
+                * BGTZL | rs = 0 | rt != 0  == BGTZC
+                * BGTZL | rs = rt != 0      == BLTZC
+                * BGTZL | rs != 0 | rt != 0 == BLTC
+                *
+                * *ZALC varint for BGTZ &&& rt != 0
+                * For real GTZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                           (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = epc + 4;
+                       regs->cp0_epc += 8;
+                       break;
+               }
+
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] > 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         * And now the FPA/cp1 branch instructions.
         */
        case cop1_op:
-               preempt_disable();
-               if (is_fpu_owner())
-                       fcr31 = read_32bit_cp1_register(CP1_STATUS);
-               else
-                       fcr31 = current->thread.fpu.fcr31;
-               preempt_enable();
-
-               bit = (insn.i_format.rt >> 2);
-               bit += (bit != 0);
-               bit += 23;
-               switch (insn.i_format.rt & 3) {
-               case 0: /* bc1f */
-               case 2: /* bc1fl */
-                       if (~fcr31 & (1 << bit)) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                               if (insn.i_format.rt == 2)
-                                       ret = BRANCH_LIKELY_TAKEN;
-                       } else
+               if (cpu_has_mips_r6 &&
+                   ((insn.i_format.rs == bc1eqz_op) ||
+                    (insn.i_format.rs == bc1nez_op))) {
+                       if (!used_math()) { /* First time FPU user */
+                               ret = init_fpu();
+                               if (ret && NO_R6EMU) {
+                                       ret = -ret;
+                                       break;
+                               }
+                               ret = 0;
+                               set_used_math();
+                       }
+                       lose_fpu(1);    /* Save FPU state for the emulator. */
+                       reg = insn.i_format.rt;
+                       bit = 0;
+                       switch (insn.i_format.rs) {
+                       case bc1eqz_op:
+                               /* Test bit 0 */
+                               if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
+                                   & 0x1)
+                                       bit = 1;
+                               break;
+                       case bc1nez_op:
+                               /* Test bit 0 */
+                               if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
+                                     & 0x1))
+                                       bit = 1;
+                               break;
+                       }
+                       own_fpu(1);
+                       if (bit)
+                               epc = epc + 4 +
+                                       (insn.i_format.simmediate << 2);
+                       else
                                epc += 8;
                        regs->cp0_epc = epc;
+
                        break;
+               } else {
 
-               case 1: /* bc1t */
-               case 3: /* bc1tl */
-                       if (fcr31 & (1 << bit)) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                               if (insn.i_format.rt == 3)
-                                       ret = BRANCH_LIKELY_TAKEN;
-                       } else
-                               epc += 8;
-                       regs->cp0_epc = epc;
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               fcr31 = read_32bit_cp1_register(CP1_STATUS);
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       bit = (insn.i_format.rt >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       switch (insn.i_format.rt & 3) {
+                       case 0: /* bc1f */
+                       case 2: /* bc1fl */
+                               if (~fcr31 & (1 << bit)) {
+                                       epc = epc + 4 +
+                                               (insn.i_format.simmediate << 2);
+                                       if (insn.i_format.rt == 2)
+                                               ret = BRANCH_LIKELY_TAKEN;
+                               } else
+                                       epc += 8;
+                               regs->cp0_epc = epc;
+                               break;
+
+                       case 1: /* bc1t */
+                       case 3: /* bc1tl */
+                               if (fcr31 & (1 << bit)) {
+                                       epc = epc + 4 +
+                                               (insn.i_format.simmediate << 2);
+                                       if (insn.i_format.rt == 3)
+                                               ret = BRANCH_LIKELY_TAKEN;
+                               } else
+                                       epc += 8;
+                               regs->cp0_epc = epc;
+                               break;
+                       }
                        break;
                }
-               break;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        epc += 8;
                regs->cp0_epc = epc;
                break;
+#else
+       case bc6_op:
+               /* Only valid for MIPS R6 */
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               regs->cp0_epc += 8;
+               break;
+       case balc6_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BALC */
+               regs->regs[31] = epc + 4;
+               epc += 4 + (insn.i_format.simmediate << 2);
+               regs->cp0_epc = epc;
+               break;
+       case beqzcjic_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BEQZC || JIC */
+               regs->cp0_epc += 8;
+               break;
+       case bnezcjialc_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BNEZC || JIALC */
+               if (insn.i_format.rs)
+                       regs->regs[31] = epc + 4;
+               regs->cp0_epc += 8;
+               break;
 #endif
+       case cbcond0_op:
+       case cbcond1_op:
+               /* Only valid for MIPS R6 */
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /*
+                * Compact branches:
+                * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+                */
+               if (insn.i_format.rt && !insn.i_format.rs)
+                       regs->regs[31] = epc + 4;
+               regs->cp0_epc += 8;
+               break;
        }
 
        return ret;
 
-sigill:
+sigill_dsp:
        printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
        force_sig(SIGBUS, current);
        return -EFAULT;
+sigill_r6:
+       pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+               current->comm);
+       force_sig(SIGILL, current);
+       return -EFAULT;
 }
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 
index 6acaad0480af366830c77be996e9dba57fd8ad95..82bd2b278a243602dbf4c7ec15f8366af1ecf059 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/percpu.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
 
 #include <asm/time.h>
 #include <asm/cevt-r4k.h>
@@ -40,7 +39,7 @@ int cp0_timer_irq_installed;
 
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
-       const int r2 = cpu_has_mips_r2;
+       const int r2 = cpu_has_mips_r2_r6;
        struct clock_event_device *cd;
        int cpu = smp_processor_id();
 
@@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev)
  */
 static int c0_compare_int_pending(void)
 {
-#ifdef CONFIG_MIPS_GIC
-       if (gic_present)
-               return gic_get_timer_pending();
-#endif
+       /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
        return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
 }
 
index 0384b05ab5a02413cbcb11a163375029f285255f..55b759a0019e61671d78919bc5b143ba5ff4a94d 100644 (file)
@@ -99,11 +99,11 @@ not_nmi:
        xori    t2, t1, 0x7
        beqz    t2, 1f
         li     t3, 32
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
        sllv    t1, t3, t1
 1:     /* At this point t1 == I-cache sets per way */
        _EXT    t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
-       addi    t2, t2, 1
+       addiu   t2, t2, 1
        mul     t1, t1, t0
        mul     t1, t1, t2
 
@@ -126,11 +126,11 @@ icache_done:
        xori    t2, t1, 0x7
        beqz    t2, 1f
         li     t3, 32
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
        sllv    t1, t3, t1
 1:     /* At this point t1 == D-cache sets per way */
        _EXT    t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
-       addi    t2, t2, 1
+       addiu   t2, t2, 1
        mul     t1, t1, t0
        mul     t1, t1, t2
 
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
        mfc0    t0, CP0_MVPCONF0
        srl     t0, t0, MVPCONF0_PVPE_SHIFT
        andi    t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
-       addi    t7, t0, 1
+       addiu   t7, t0, 1
 
        /* If there's only 1, we're done */
        beqz    t0, 2f
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
        mttc0   t0, CP0_TCHALT
 
        /* Next VPE */
-       addi    t5, t5, 1
+       addiu   t5, t5, 1
        slt     t0, t5, t7
        bnez    t0, 1b
         nop
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
        mfc0    t1, CP0_MVPCONF0
        srl     t1, t1, MVPCONF0_PVPE_SHIFT
        andi    t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
 
        /* Calculate a mask for the VPE ID from EBase.CPUNum */
        clz     t1, t1
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
 
        /* Next VPE */
 2:     srl     t6, t6, 1
-       addi    t5, t5, 1
+       addiu   t5, t5, 1
        bnez    t6, 1b
         nop
 
index 2d80b5f1aeae29361843640d1d0c4148cbf489fd..09f4034f239f511867a4d56792de909d0ad0c773 100644 (file)
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
        panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 }
 
-int daddiu_bug = -1;
+int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
 static inline void check_daddiu(void)
 {
@@ -314,11 +314,14 @@ static inline void check_daddiu(void)
 
 void __init check_bugs64_early(void)
 {
-       check_mult_sh();
-       check_daddiu();
+       if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+               check_mult_sh();
+               check_daddiu();
+       }
 }
 
 void __init check_bugs64(void)
 {
-       check_daddi();
+       if (!config_enabled(CONFIG_CPU_MIPSR6))
+               check_daddi();
 }
index 5342674842f5826572b71ce10a76c7a2f635139f..48dfb9de853ddc92ebf4254a95e400b0db5e1789 100644 (file)
@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
                c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
                break;
 
+       /* R6 incompatible with everything else */
+       case MIPS_CPU_ISA_M64R6:
+               c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+       case MIPS_CPU_ISA_M32R6:
+               c->isa_level |= MIPS_CPU_ISA_M32R6;
+               /* Break here so we don't add incompatible ISAs */
+               break;
        case MIPS_CPU_ISA_M32R2:
                c->isa_level |= MIPS_CPU_ISA_M32R2;
        case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M32R2);
                        break;
+               case 2:
+                       set_isa(c, MIPS_CPU_ISA_M32R6);
+                       break;
                default:
                        goto unknown;
                }
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M64R2);
                        break;
+               case 2:
+                       set_isa(c, MIPS_CPU_ISA_M64R6);
+                       break;
                default:
                        goto unknown;
                }
@@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
        if (config3 & MIPS_CONF3_MSA)
                c->ases |= MIPS_ASE_MSA;
        /* Only tested on 32-bit cores */
-       if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
+       if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
+               c->htw_seq = 0;
                c->options |= MIPS_CPU_HTW;
+       }
 
        return config3 & MIPS_CONF_M;
 }
@@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_EVA;
        if (config5 & MIPS_CONF5_MRP)
                c->options |= MIPS_CPU_MAAR;
+       if (config5 & MIPS_CONF5_LLB)
+               c->options |= MIPS_CPU_RW_LLB;
 
        return config5 & MIPS_CONF_M;
 }
@@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c)
 
        if (cpu_has_rixi) {
                /* Enable the RIXI exceptions */
-               write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+               set_c0_pagegrain(PG_IEC);
                back_to_back_c0_hazard();
                /* Verify the IEC bit is set */
                if (read_c0_pagegrain() & PG_IEC)
@@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c)
        }
 
 #ifndef CONFIG_MIPS_CPS
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                c->core = get_ebase_cpunum();
                if (cpu_has_mipsmt)
                        c->core >>= fls(core_nvpes()) - 1;
@@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
 {
        c->writecombine = _CACHE_UNCACHED_ACCELERATED;
        switch (c->processor_id & PRID_IMP_MASK) {
+       case PRID_IMP_QEMU_GENERIC:
+               c->writecombine = _CACHE_UNCACHED;
+               c->cputype = CPU_QEMU_GENERIC;
+               __cpu_name[cpu] = "MIPS GENERIC QEMU";
+               break;
        case PRID_IMP_4KC:
                c->cputype = CPU_4KC;
                c->writecombine = _CACHE_UNCACHED;
@@ -1345,8 +1367,7 @@ void cpu_probe(void)
        if (c->options & MIPS_CPU_FPU) {
                c->fpu_id = cpu_get_fpu_id();
 
-               if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+               if (c->isa_level & cpu_has_mips_r) {
                        if (c->fpu_id & MIPS_FPIR_3D)
                                c->ases |= MIPS_ASE_MIPS3D;
                        if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1354,7 +1375,7 @@ void cpu_probe(void)
                }
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
                /* R2 has Performance Counter Interrupt indicator */
                c->options |= MIPS_CPU_PCI;
index a5b5b56485c1618c34af3daea2b67e795a0f8c2b..d2c09f6475c5cb5454b34ed1c498d43fdc31b29e 100644 (file)
 #include <linux/elf.h>
 #include <linux/sched.h>
 
+/* FPU modes */
 enum {
-       FP_ERROR = -1,
-       FP_DOUBLE_64A = -2,
+       FP_FRE,
+       FP_FR0,
+       FP_FR1,
 };
 
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single:    The program being loaded needs an FPU but it will only issue
+ *             single precision instructions meaning that it can execute in
+ *             either FR0 or FR1.
+ * @soft:      The soft(-float) requirement means that the program being
+ *             loaded needs has no FPU dependency at all (i.e. it has no
+ *             FPU instructions).
+ * @fr1:       The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault: The program being loaded depends on the default FPU mode.
+ *             That is FR0 for O32 and FR1 for N32/N64.
+ * @fre:       The program being loaded depends on FPU with FRE=1. This mode is
+ *             a bridge which uses FR=1 whilst still being able to maintain
+ *             full compatibility with pre-existing code using the O32 FP32
+ *             ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+       bool single;
+       bool soft;
+       bool fr1;
+       bool frdefault;
+       bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+       [MIPS_ABI_FP_ANY]    = { true,  true,  true,  true,  true  },
+       [MIPS_ABI_FP_DOUBLE] = { false, false, false, true,  true  },
+       [MIPS_ABI_FP_SINGLE] = { true,  false, false, false, false },
+       [MIPS_ABI_FP_SOFT]   = { false, true,  false, false, false },
+       [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+       [MIPS_ABI_FP_XX]     = { false, false, true,  true,  true  },
+       [MIPS_ABI_FP_64]     = { false, false, true,  false, false },
+       [MIPS_ABI_FP_64A]    = { false, false, true,  false, true  }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                     bool is_interp, struct arch_elf_state *state)
 {
-       struct elf32_hdr *ehdr = _ehdr;
-       struct elf32_phdr *phdr = _phdr;
+       struct elf32_hdr *ehdr32 = _ehdr;
+       struct elf32_phdr *phdr32 = _phdr;
+       struct elf64_phdr *phdr64 = _phdr;
        struct mips_elf_abiflags_v0 abiflags;
        int ret;
 
-       if (config_enabled(CONFIG_64BIT) &&
-           (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
-               return 0;
-       if (phdr->p_type != PT_MIPS_ABIFLAGS)
-               return 0;
-       if (phdr->p_filesz < sizeof(abiflags))
-               return -EINVAL;
+       /* Lets see if this is an O32 ELF */
+       if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
+               /* FR = 1 for N32 */
+               if (ehdr32->e_flags & EF_MIPS_ABI2)
+                       state->overall_fp_mode = FP_FR1;
+               else
+                       /* Set a good default FPU mode for O32 */
+                       state->overall_fp_mode = cpu_has_mips_r6 ?
+                               FP_FRE : FP_FR0;
+
+               if (ehdr32->e_flags & EF_MIPS_FP64) {
+                       /*
+                        * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+                        * later if needed
+                        */
+                       if (is_interp)
+                               state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+                       else
+                               state->fp_abi = MIPS_ABI_FP_OLD_64;
+               }
+               if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+                       return 0;
+
+               if (phdr32->p_filesz < sizeof(abiflags))
+                       return -EINVAL;
+
+               ret = kernel_read(elf, phdr32->p_offset,
+                                 (char *)&abiflags,
+                                 sizeof(abiflags));
+       } else {
+               /* FR=1 is really the only option for 64-bit */
+               state->overall_fp_mode = FP_FR1;
+
+               if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+                       return 0;
+               if (phdr64->p_filesz < sizeof(abiflags))
+                       return -EINVAL;
+
+               ret = kernel_read(elf, phdr64->p_offset,
+                                 (char *)&abiflags,
+                                 sizeof(abiflags));
+       }
 
-       ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
-                         sizeof(abiflags));
        if (ret < 0)
                return ret;
        if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
        return 0;
 }
 
-static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(int in_abi)
 {
        /* If the ABI requirement is provided, simply return that */
-       if (in_abi != -1)
+       if (in_abi != MIPS_ABI_FP_UNKNOWN)
                return in_abi;
 
-       /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
-       if (ehdr->e_flags & EF_MIPS_FP64)
-               return MIPS_ABI_FP_64;
-
-       /* Default to MIPS_ABI_FP_DOUBLE */
-       return MIPS_ABI_FP_DOUBLE;
+       /* Unknown ABI */
+       return MIPS_ABI_FP_UNKNOWN;
 }
 
 int arch_check_elf(void *_ehdr, bool has_interpreter,
                   struct arch_elf_state *state)
 {
        struct elf32_hdr *ehdr = _ehdr;
-       unsigned fp_abi, interp_fp_abi, abi0, abi1;
+       struct mode_req prog_req, interp_req;
+       int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
 
-       /* Ignore non-O32 binaries */
-       if (config_enabled(CONFIG_64BIT) &&
-           (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+       if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
                return 0;
 
-       fp_abi = get_fp_abi(ehdr, state->fp_abi);
+       fp_abi = get_fp_abi(state->fp_abi);
 
        if (has_interpreter) {
-               interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+               interp_fp_abi = get_fp_abi(state->interp_fp_abi);
 
                abi0 = min(fp_abi, interp_fp_abi);
                abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
                abi0 = abi1 = fp_abi;
        }
 
-       state->overall_abi = FP_ERROR;
-
-       if (abi0 == abi1) {
-               state->overall_abi = abi0;
-       } else if (abi0 == MIPS_ABI_FP_ANY) {
-               state->overall_abi = abi1;
-       } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
-               switch (abi1) {
-               case MIPS_ABI_FP_XX:
-                       state->overall_abi = MIPS_ABI_FP_DOUBLE;
-                       break;
-
-               case MIPS_ABI_FP_64A:
-                       state->overall_abi = FP_DOUBLE_64A;
-                       break;
-               }
-       } else if (abi0 == MIPS_ABI_FP_SINGLE ||
-                  abi0 == MIPS_ABI_FP_SOFT) {
-               /* Cannot link with other ABIs */
-       } else if (abi0 == MIPS_ABI_FP_OLD_64) {
-               switch (abi1) {
-               case MIPS_ABI_FP_XX:
-               case MIPS_ABI_FP_64:
-               case MIPS_ABI_FP_64A:
-                       state->overall_abi = MIPS_ABI_FP_64;
-                       break;
-               }
-       } else if (abi0 == MIPS_ABI_FP_XX ||
-                  abi0 == MIPS_ABI_FP_64 ||
-                  abi0 == MIPS_ABI_FP_64A) {
-               state->overall_abi = MIPS_ABI_FP_64;
-       }
+       /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
+       max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+                  (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
+               MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
 
-       switch (state->overall_abi) {
-       case MIPS_ABI_FP_64:
-       case MIPS_ABI_FP_64A:
-       case FP_DOUBLE_64A:
-               if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-                       return -ELIBBAD;
-               break;
+       if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+           (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+               return -ELIBBAD;
+
+       /* It's time to determine the FPU mode requirements */
+       prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+       interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
 
-       case FP_ERROR:
+       /*
+        * Check whether the program's and interp's ABIs have a matching FPU
+        * mode requirement.
+        */
+       prog_req.single = interp_req.single && prog_req.single;
+       prog_req.soft = interp_req.soft && prog_req.soft;
+       prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+       prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+       prog_req.fre = interp_req.fre && prog_req.fre;
+
+       /*
+        * Determine the desired FPU mode
+        *
+        * Decision making:
+        *
+        * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+        *   means that we have a combination of program and interpreter
+        *   that inherently require the hybrid FP mode.
+        * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+        *   fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+        *   instructions so we don't care about the mode. We will simply use
+        *   the one preferred by the hardware. In fpxx case, that ABI can
+        *   handle both FR=1 and FR=0, so, again, we simply choose the one
+        *   preferred by the hardware. Next, if we only use single-precision
+        *   FPU instructions, and the default ABI FPU mode is not good
+        *   (ie single + any ABI combination), we set again the FPU mode to the
+        *   one is preferred by the hardware. Next, if we know that the code
+        *   will only use single-precision instructions, shown by single being
+        *   true but frdefault being false, then we again set the FPU mode to
+        *   the one that is preferred by the hardware.
+        * - We want FP_FR1 if that's the only matching mode and the default one
+        *   is not good.
+        * - Return with -ELIBADD if we can't find a matching FPU mode.
+        */
+       if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+               state->overall_fp_mode = FP_FRE;
+       else if ((prog_req.fr1 && prog_req.frdefault) ||
+                (prog_req.single && !prog_req.frdefault))
+               /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+               state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+                                         cpu_has_mips_r2_r6) ?
+                                         FP_FR1 : FP_FR0;
+       else if (prog_req.fr1)
+               state->overall_fp_mode = FP_FR1;
+       else  if (!prog_req.fre && !prog_req.frdefault &&
+                 !prog_req.fr1 && !prog_req.single && !prog_req.soft)
                return -ELIBBAD;
-       }
 
        return 0;
 }
 
-void mips_set_personality_fp(struct arch_elf_state *state)
+static inline void set_thread_fp_mode(int hybrid, int regs32)
 {
-       if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
-               /*
-                * Use hybrid FPRs for all code which can correctly execute
-                * with that mode.
-                */
-               switch (state->overall_abi) {
-               case MIPS_ABI_FP_DOUBLE:
-               case MIPS_ABI_FP_SINGLE:
-               case MIPS_ABI_FP_SOFT:
-               case MIPS_ABI_FP_XX:
-               case MIPS_ABI_FP_ANY:
-                       /* FR=1, FRE=1 */
-                       clear_thread_flag(TIF_32BIT_FPREGS);
-                       set_thread_flag(TIF_HYBRID_FPREGS);
-                       return;
-               }
-       }
-
-       switch (state->overall_abi) {
-       case MIPS_ABI_FP_DOUBLE:
-       case MIPS_ABI_FP_SINGLE:
-       case MIPS_ABI_FP_SOFT:
-               /* FR=0 */
-               set_thread_flag(TIF_32BIT_FPREGS);
+       if (hybrid)
+               set_thread_flag(TIF_HYBRID_FPREGS);
+       else
                clear_thread_flag(TIF_HYBRID_FPREGS);
-               break;
-
-       case FP_DOUBLE_64A:
-               /* FR=1, FRE=1 */
+       if (regs32)
+               set_thread_flag(TIF_32BIT_FPREGS);
+       else
                clear_thread_flag(TIF_32BIT_FPREGS);
-               set_thread_flag(TIF_HYBRID_FPREGS);
-               break;
+}
 
-       case MIPS_ABI_FP_64:
-       case MIPS_ABI_FP_64A:
-               /* FR=1, FRE=0 */
-               clear_thread_flag(TIF_32BIT_FPREGS);
-               clear_thread_flag(TIF_HYBRID_FPREGS);
-               break;
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+       /*
+        * This function is only ever called for O32 ELFs so we should
+        * not be worried about N32/N64 binaries.
+        */
 
-       case MIPS_ABI_FP_XX:
-       case MIPS_ABI_FP_ANY:
-               if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-                       set_thread_flag(TIF_32BIT_FPREGS);
-               else
-                       clear_thread_flag(TIF_32BIT_FPREGS);
+       if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+               return;
 
-               clear_thread_flag(TIF_HYBRID_FPREGS);
+       switch (state->overall_fp_mode) {
+       case FP_FRE:
+               set_thread_fp_mode(1, 0);
+               break;
+       case FP_FR0:
+               set_thread_fp_mode(0, 1);
+               break;
+       case FP_FR1:
+               set_thread_fp_mode(0, 0);
                break;
-
        default:
-       case FP_ERROR:
                BUG();
        }
 }
index 4353d323f0175cc2fcbefac58762b90ad7da0c59..af41ba6db9601d16540b945b0112e130bd9eb845 100644 (file)
@@ -46,6 +46,11 @@ resume_userspace:
        local_irq_disable               # make sure we dont miss an
                                        # interrupt setting need_resched
                                        # between sampling and return
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+       lw      k0, TI_R2_EMUL_RET($28)
+       bnez    k0, restore_all_from_r2_emul
+#endif
+
        LONG_L  a2, TI_FLAGS($28)       # current->work
        andi    t0, a2, _TIF_WORK_MASK  # (ignoring syscall_trace)
        bnez    t0, work_pending
@@ -114,6 +119,19 @@ restore_partial:           # restore partial frame
        RESTORE_SP_AND_RET
        .set    at
 
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+restore_all_from_r2_emul:                      # restore full frame
+       .set    noat
+       sw      zero, TI_R2_EMUL_RET($28)       # reset it
+       RESTORE_TEMP
+       RESTORE_AT
+       RESTORE_STATIC
+       RESTORE_SOME
+       LONG_L  sp, PT_R29(sp)
+       eretnc
+       .set    at
+#endif
+
 work_pending:
        andi    t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
        beqz    t0, work_notifysig
@@ -158,7 +176,8 @@ syscall_exit_work:
        jal     syscall_trace_leave
        b       resume_userspace
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
+    defined(CONFIG_MIPS_MT)
 
 /*
  * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@ LEAF(mips_ihb)
        nop
        END(mips_ihb)
 
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
index a5e26dd9059256ed7c2a2033210bcd1bbc99b6e3..2ebaabe3af1513269e100d8bcffa9e8e9cb1f2c8 100644 (file)
@@ -125,7 +125,7 @@ LEAF(__r4k_wait)
        nop
        nop
 #endif
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
        wait
        /* end of rollback region (the region size must be power of two) */
 1:
index 0b9082b6b6832d104a7994c0a6d44f13f61912cb..368c88b7eb6c985a848601415c9e8baf7aa2a5bb 100644 (file)
@@ -186,6 +186,7 @@ void __init check_wait(void)
        case CPU_PROAPTIV:
        case CPU_P5600:
        case CPU_M5150:
+       case CPU_QEMU_GENERIC:
                cpu_wait = r4k_wait;
                if (read_c0_config7() & MIPS_CONF7_WII)
                        cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644 (file)
index 0000000..64d17e4
--- /dev/null
@@ -0,0 +1,2378 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *      MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU  "daddiu "
+#define INS    "dins "
+#define EXT    "dext "
+#else
+#define ADDIU  "addiu "
+#define INS    "ins "
+#define EXT    "ext "
+#endif /* CONFIG_64BIT */
+
+#define SB     "sb "
+#define LB     "lb "
+#define LL     "ll "
+#define SC     "sc "
+
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS        10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+       mipsr2_emulation = 1;
+
+       pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+       return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+       switch (MIPSInst_OPCODE(ir)) {
+       case addiu_op:
+               if (MIPSInst_RT(ir))
+                       regs->regs[MIPSInst_RT(ir)] =
+                               (s32)regs->regs[MIPSInst_RS(ir)] +
+                               (s32)MIPSInst_SIMM(ir);
+               return 0;
+       case daddiu_op:
+               if (config_enabled(CONFIG_32BIT))
+                       break;
+
+               if (MIPSInst_RT(ir))
+                       regs->regs[MIPSInst_RT(ir)] =
+                               (s64)regs->regs[MIPSInst_RS(ir)] +
+                               (s64)MIPSInst_SIMM(ir);
+               return 0;
+       case lwc1_op:
+       case swc1_op:
+       case cop1_op:
+       case cop1x_op:
+               /* FPU instructions in delay slot */
+               return -SIGFPE;
+       case spec_op:
+               switch (MIPSInst_FUNC(ir)) {
+               case or_op:
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       regs->regs[MIPSInst_RS(ir)] |
+                                       regs->regs[MIPSInst_RT(ir)];
+                       return 0;
+               case sll_op:
+                       if (MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case srl_op:
+                       if (MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case addu_op:
+                       if (MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+                                             (u32)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               case subu_op:
+                       if (MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+                                             (u32)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               case dsll_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case dsrl_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case daddu_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (u64)regs->regs[MIPSInst_RS(ir)] +
+                                       (u64)regs->regs[MIPSInst_RT(ir)];
+                       return 0;
+               case dsubu_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+                                             (u64)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               }
+               break;
+       default:
+               pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+                        ir, MIPSInst_OPCODE(ir));
+       }
+
+       return SIGILL;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+       u32 csr;
+       u32 cond;
+
+       csr = current->thread.fpu.fcr31;
+       cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+       if (((csr & cond) == 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+       return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+       u32 csr;
+       u32 cond;
+
+       csr = current->thread.fpu.fcr31;
+       cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+       if (((csr & cond) != 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+       int err;
+       unsigned long cepc, epc, nepc;
+       u32 nir;
+
+       if (delay_slot(regs))
+               return SIGILL;
+
+       /* EPC after the RI/JR instruction */
+       nepc = regs->cp0_epc;
+       /* Roll back to the reserved R2 JR instruction */
+       regs->cp0_epc -= 4;
+       epc = regs->cp0_epc;
+       err = __compute_return_epc(regs);
+
+       if (err < 0)
+               return SIGEMT;
+
+
+       /* Computed EPC */
+       cepc = regs->cp0_epc;
+
+       /* Get DS instruction */
+       err = __get_user(nir, (u32 __user *)nepc);
+       if (err)
+               return SIGSEGV;
+
+       MIPS_R2BR_STATS(jrs);
+
+       /* If nir == 0(NOP), then nothing else to do */
+       if (nir) {
+               /*
+                * Negative err means FPU instruction in BD-slot,
+                * Zero err means 'BD-slot emulation done'
+                * For anything else we go back to trampoline emulation.
+                */
+               err = mipsr6_emul(regs, nir);
+               if (err > 0) {
+                       regs->cp0_epc = nepc;
+                       err = mips_dsemul(regs, nir, cepc);
+                       if (err == SIGILL)
+                               err = SIGEMT;
+                       MIPS_R2_STATS(dsemul);
+               }
+       }
+
+       return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+       if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+       if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+       if (MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+       regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+       if (MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+       regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+
+       rs = res;
+       regs->lo = (s64)rs;
+       rt = res >> 32;
+       res = (s64)rt;
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = res;
+       regs->lo = (s64)rt;
+       regs->hi = (s64)(res >> 32);
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = (s64)(rs / rt);
+       regs->hi = (s64)(rs % rt);
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = (s64)(rs / rt);
+       regs->hi = (s64)(rs % rt);
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = rt * rs;
+
+       regs->lo = res;
+       __asm__ __volatile__(
+               "dmuh %0, %1, %2\t\n"
+               : "=r"(res)
+               : "r"(rt), "r"(rs));
+
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = rt * rs;
+
+       regs->lo = res;
+       __asm__ __volatile__(
+               "dmuhu %0, %1, %2\t\n"
+               : "=r"(res)
+               : "r"(rt), "r"(rs));
+
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+       s64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = rs / rt;
+       regs->hi = rs % rt;
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = rs / rt;
+       regs->hi = rs % rt;
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static struct r2_decoder_table spec_op_table[] = {
+       { 0xfc1ff83f, 0x00000008, jr_func },
+       { 0xfc00ffff, 0x00000018, mult_func },
+       { 0xfc00ffff, 0x00000019, multu_func },
+       { 0xfc00ffff, 0x0000001c, dmult_func },
+       { 0xfc00ffff, 0x0000001d, dmultu_func },
+       { 0xffff07ff, 0x00000010, mfhi_func },
+       { 0xfc1fffff, 0x00000011, mthi_func },
+       { 0xffff07ff, 0x00000012, mflo_func },
+       { 0xfc1fffff, 0x00000013, mtlo_func },
+       { 0xfc0307ff, 0x00000001, movf_func },
+       { 0xfc0307ff, 0x00010001, movt_func },
+       { 0xfc0007ff, 0x0000000a, movz_func },
+       { 0xfc0007ff, 0x0000000b, movn_func },
+       { 0xfc00ffff, 0x0000001a, div_func },
+       { 0xfc00ffff, 0x0000001b, divu_func },
+       { 0xfc00ffff, 0x0000001e, ddiv_func },
+       { 0xfc00ffff, 0x0000001f, ddivu_func },
+       {}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res += ((((s64)rt) << 32) | (u32)rs);
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res += ((((s64)rt) << 32) | (u32)rs);
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+
+       rs = res;
+       regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+       u32 res;
+       u32 rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+       u32 res;
+       u32 rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static struct r2_decoder_table spec2_op_table[] = {
+       { 0xfc00ffff, 0x70000000, madd_func },
+       { 0xfc00ffff, 0x70000001, maddu_func },
+       { 0xfc0007ff, 0x70000002, mul_func },
+       { 0xfc00ffff, 0x70000004, msub_func },
+       { 0xfc00ffff, 0x70000005, msubu_func },
+       { 0xfc0007ff, 0x70000020, clz_func },
+       { 0xfc0007ff, 0x70000021, clo_func },
+       { 0xfc0007ff, 0x70000024, dclz_func },
+       { 0xfc0007ff, 0x70000025, dclo_func },
+       { }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+                                     struct r2_decoder_table *table)
+{
+       struct r2_decoder_table *p;
+       int err;
+
+       for (p = table; p->func; p++) {
+               if ((inst & p->mask) == p->code) {
+                       err = (p->func)(regs, inst);
+                       return err;
+               }
+       }
+       return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst)
+{
+       int err = 0;
+       unsigned long vaddr;
+       u32 nir;
+       unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+       void __user *fault_addr = NULL;
+       int pass = 0;
+
+repeat:
+       r31 = regs->regs[31];
+       epc = regs->cp0_epc;
+       err = compute_return_epc(regs);
+       if (err < 0) {
+               BUG();
+               return SIGEMT;
+       }
+       pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+                inst, epc, pass);
+
+       switch (MIPSInst_OPCODE(inst)) {
+       case spec_op:
+               err = mipsr2_find_op_func(regs, inst, spec_op_table);
+               if (err < 0) {
+                       /* FPU instruction under JR */
+                       regs->cp0_cause |= CAUSEF_BD;
+                       goto fpu_emul;
+               }
+               break;
+       case spec2_op:
+               err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+               break;
+       case bcond_op:
+               rt = MIPSInst_RT(inst);
+               rs = MIPSInst_RS(inst);
+               switch (rt) {
+               case tgei_op:
+                       if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TGEI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tgeiu_op:
+                       if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+                               do_trap_or_bp(regs, 0, "TGEIU");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tlti_op:
+                       if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TLTI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tltiu_op:
+                       if (regs->regs[rs] < MIPSInst_UIMM(inst))
+                               do_trap_or_bp(regs, 0, "TLTIU");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case teqi_op:
+                       if (regs->regs[rs] == MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TEQI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tnei_op:
+                       if (regs->regs[rs] != MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TNEI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case bltzl_op:
+               case bgezl_op:
+               case bltzall_op:
+               case bgezall_op:
+                       if (delay_slot(regs)) {
+                               err = SIGILL;
+                               break;
+                       }
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = __compute_return_epc(regs);
+                       if (err < 0)
+                               return SIGEMT;
+                       if (err != BRANCH_LIKELY_TAKEN)
+                               break;
+                       cpc = regs->cp0_epc;
+                       nepc = epc + 4;
+                       err = __get_user(nir, (u32 __user *)nepc);
+                       if (err) {
+                               err = SIGSEGV;
+                               break;
+                       }
+                       /*
+                        * This will probably be optimized away when
+                        * CONFIG_DEBUG_FS is not enabled
+                        */
+                       switch (rt) {
+                       case bltzl_op:
+                               MIPS_R2BR_STATS(bltzl);
+                               break;
+                       case bgezl_op:
+                               MIPS_R2BR_STATS(bgezl);
+                               break;
+                       case bltzall_op:
+                               MIPS_R2BR_STATS(bltzall);
+                               break;
+                       case bgezall_op:
+                               MIPS_R2BR_STATS(bgezall);
+                               break;
+                       }
+
+                       switch (MIPSInst_OPCODE(nir)) {
+                       case cop1_op:
+                       case cop1x_op:
+                       case lwc1_op:
+                       case swc1_op:
+                               regs->cp0_cause |= CAUSEF_BD;
+                               goto fpu_emul;
+                       }
+                       if (nir) {
+                               err = mipsr6_emul(regs, nir);
+                               if (err > 0) {
+                                       err = mips_dsemul(regs, nir, cpc);
+                                       if (err == SIGILL)
+                                               err = SIGEMT;
+                                       MIPS_R2_STATS(dsemul);
+                               }
+                       }
+                       break;
+               case bltzal_op:
+               case bgezal_op:
+                       if (delay_slot(regs)) {
+                               err = SIGILL;
+                               break;
+                       }
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = __compute_return_epc(regs);
+                       if (err < 0)
+                               return SIGEMT;
+                       cpc = regs->cp0_epc;
+                       nepc = epc + 4;
+                       err = __get_user(nir, (u32 __user *)nepc);
+                       if (err) {
+                               err = SIGSEGV;
+                               break;
+                       }
+                       /*
+                        * This will probably be optimized away when
+                        * CONFIG_DEBUG_FS is not enabled
+                        */
+                       switch (rt) {
+                       case bltzal_op:
+                               MIPS_R2BR_STATS(bltzal);
+                               break;
+                       case bgezal_op:
+                               MIPS_R2BR_STATS(bgezal);
+                               break;
+                       }
+
+                       switch (MIPSInst_OPCODE(nir)) {
+                       case cop1_op:
+                       case cop1x_op:
+                       case lwc1_op:
+                       case swc1_op:
+                               regs->cp0_cause |= CAUSEF_BD;
+                               goto fpu_emul;
+                       }
+                       if (nir) {
+                               err = mipsr6_emul(regs, nir);
+                               if (err > 0) {
+                                       err = mips_dsemul(regs, nir, cpc);
+                                       if (err == SIGILL)
+                                               err = SIGEMT;
+                                       MIPS_R2_STATS(dsemul);
+                               }
+                       }
+                       break;
+               default:
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = SIGILL;
+                       break;
+               }
+               break;
+
+       case beql_op:
+       case bnel_op:
+       case blezl_op:
+       case bgtzl_op:
+               if (delay_slot(regs)) {
+                       err = SIGILL;
+                       break;
+               }
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+               err = __compute_return_epc(regs);
+               if (err < 0)
+                       return SIGEMT;
+               if (err != BRANCH_LIKELY_TAKEN)
+                       break;
+               cpc = regs->cp0_epc;
+               nepc = epc + 4;
+               err = __get_user(nir, (u32 __user *)nepc);
+               if (err) {
+                       err = SIGSEGV;
+                       break;
+               }
+               /*
+                * This will probably be optimized away when
+                * CONFIG_DEBUG_FS is not enabled
+                */
+               switch (MIPSInst_OPCODE(inst)) {
+               case beql_op:
+                       MIPS_R2BR_STATS(beql);
+                       break;
+               case bnel_op:
+                       MIPS_R2BR_STATS(bnel);
+                       break;
+               case blezl_op:
+                       MIPS_R2BR_STATS(blezl);
+                       break;
+               case bgtzl_op:
+                       MIPS_R2BR_STATS(bgtzl);
+                       break;
+               }
+
+               switch (MIPSInst_OPCODE(nir)) {
+               case cop1_op:
+               case cop1x_op:
+               case lwc1_op:
+               case swc1_op:
+                       regs->cp0_cause |= CAUSEF_BD;
+                       goto fpu_emul;
+               }
+               if (nir) {
+                       err = mipsr6_emul(regs, nir);
+                       if (err > 0) {
+                               err = mips_dsemul(regs, nir, cpc);
+                               if (err == SIGILL)
+                                       err = SIGEMT;
+                               MIPS_R2_STATS(dsemul);
+                       }
+               }
+               break;
+       case lwc1_op:
+       case swc1_op:
+       case cop1_op:
+       case cop1x_op:
+fpu_emul:
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+               if (!used_math()) {     /* First time FPU user.  */
+                       err = init_fpu();
+                       set_used_math();
+               }
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+
+               err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+                                              &fault_addr);
+
+               /*
+                * this is a tricky issue - lose_fpu() uses LL/SC atomics
+                * if FPU is owned and effectively cancels user level LL/SC.
+                * So, it could be logical to don't restore FPU ownership here.
+                * But the sequence of multiple FPU instructions is much much
+                * more often than LL-FPU-SC and I prefer loop here until
+                * next scheduler cycle cancels FPU ownership
+                */
+               own_fpu(1);     /* Restore FPU state. */
+
+               if (err)
+                       current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+               MIPS_R2_STATS(fpus);
+
+               break;
+
+       case lwl_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:     sll     %0, %0, 0\n"
+                       "10:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       10b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+
+               break;
+
+       case lwr_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       sll     %0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       sll     %0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "10:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       10b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+
+               break;
+
+       case swl_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                               EXT     "%1, %0, 24, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 0, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                               EXT     "%1, %0, 24, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 0, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+
+       case swr_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                               EXT     "%1, %0, 0, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 24, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                               EXT     "%1, %0, 0, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 24, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+
+       case ldl_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 56, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 48, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 40, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 32, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 56, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 48, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 40, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 32, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 24, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 16, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 8, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+               break;
+
+       case ldr_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 0, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 8, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 16, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 24, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 32, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 40, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 48, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 0, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 32, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 40, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 48, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li     %3,%4\n"
+                       "       j      9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word  1b,8b\n"
+                       "       .word  2b,8b\n"
+                       "       .word  3b,8b\n"
+                       "       .word  4b,8b\n"
+                       "       .word  5b,8b\n"
+                       "       .word  6b,8b\n"
+                       "       .word  7b,8b\n"
+                       "       .word  0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+               break;
+
+       case sdl_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "       dextu   %1, %0, 56, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 0, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "       dextu   %1, %0, 56, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 0, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+               break;
+
+       case sdr_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "       dext    %1, %0, 0, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 56, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "       dext    %1, %0, 0, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 56, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+       case ll_op:
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x3) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "ll     %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "=&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV)
+                       : "memory");
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case sc_op:
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x3) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               res = regs->regs[MIPSInst_RT(inst)];
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "sc     %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "+&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case lld_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x7) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "lld    %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "=&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV)
+                       : "memory");
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case scd_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x7) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               res = regs->regs[MIPSInst_RT(inst)];
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "scd    %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "+&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+       case pref_op:
+               /* skip it */
+               break;
+       default:
+               err = SIGILL;
+       }
+
+       /*
+        * Lets not return to userland just yet. It's constly and
+        * it's likely we have more R2 instructions to emulate
+        */
+       if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+               regs->cp0_cause &= ~CAUSEF_BD;
+               err = get_user(inst, (u32 __user *)regs->cp0_epc);
+               if (!err)
+                       goto repeat;
+
+               if (err < 0)
+                       err = SIGSEGV;
+       }
+
+       if (err && (err != SIGEMT)) {
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+       }
+
+       /* Likely a MIPS R6 compatible instruction */
+       if (pass && (err == SIGILL))
+               err = 0;
+
+       return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_stats_show(struct seq_file *s, void *unused)
+{
+
+       seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+       seq_printf(s, "movs\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+       seq_printf(s, "hilo\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+       seq_printf(s, "muls\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+       seq_printf(s, "divs\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+       seq_printf(s, "dsps\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+       seq_printf(s, "bops\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+       seq_printf(s, "traps\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+       seq_printf(s, "fpus\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+       seq_printf(s, "loads\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+       seq_printf(s, "stores\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+       seq_printf(s, "llsc\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+       seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+       seq_printf(s, "jr\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+       seq_printf(s, "bltzl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+       seq_printf(s, "bgezl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+       seq_printf(s, "bltzll\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+       seq_printf(s, "bgezll\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+       seq_printf(s, "bltzal\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+       seq_printf(s, "bgezal\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+       seq_printf(s, "beql\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+       seq_printf(s, "bnel\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+       seq_printf(s, "blezl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+       seq_printf(s, "bgtzl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+       return 0;
+}
+
+static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+{
+       mipsr2_stats_show(s, unused);
+
+       __this_cpu_write((mipsr2emustats).movs, 0);
+       __this_cpu_write((mipsr2bdemustats).movs, 0);
+       __this_cpu_write((mipsr2emustats).hilo, 0);
+       __this_cpu_write((mipsr2bdemustats).hilo, 0);
+       __this_cpu_write((mipsr2emustats).muls, 0);
+       __this_cpu_write((mipsr2bdemustats).muls, 0);
+       __this_cpu_write((mipsr2emustats).divs, 0);
+       __this_cpu_write((mipsr2bdemustats).divs, 0);
+       __this_cpu_write((mipsr2emustats).dsps, 0);
+       __this_cpu_write((mipsr2bdemustats).dsps, 0);
+       __this_cpu_write((mipsr2emustats).bops, 0);
+       __this_cpu_write((mipsr2bdemustats).bops, 0);
+       __this_cpu_write((mipsr2emustats).traps, 0);
+       __this_cpu_write((mipsr2bdemustats).traps, 0);
+       __this_cpu_write((mipsr2emustats).fpus, 0);
+       __this_cpu_write((mipsr2bdemustats).fpus, 0);
+       __this_cpu_write((mipsr2emustats).loads, 0);
+       __this_cpu_write((mipsr2bdemustats).loads, 0);
+       __this_cpu_write((mipsr2emustats).stores, 0);
+       __this_cpu_write((mipsr2bdemustats).stores, 0);
+       __this_cpu_write((mipsr2emustats).llsc, 0);
+       __this_cpu_write((mipsr2bdemustats).llsc, 0);
+       __this_cpu_write((mipsr2emustats).dsemul, 0);
+       __this_cpu_write((mipsr2bdemustats).dsemul, 0);
+       __this_cpu_write((mipsr2bremustats).jrs, 0);
+       __this_cpu_write((mipsr2bremustats).bltzl, 0);
+       __this_cpu_write((mipsr2bremustats).bgezl, 0);
+       __this_cpu_write((mipsr2bremustats).bltzll, 0);
+       __this_cpu_write((mipsr2bremustats).bgezll, 0);
+       __this_cpu_write((mipsr2bremustats).bltzal, 0);
+       __this_cpu_write((mipsr2bremustats).bgezal, 0);
+       __this_cpu_write((mipsr2bremustats).beql, 0);
+       __this_cpu_write((mipsr2bremustats).bnel, 0);
+       __this_cpu_write((mipsr2bremustats).blezl, 0);
+       __this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+       return 0;
+}
+
+static int mipsr2_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mipsr2_stats_show, inode->i_private);
+}
+
+static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mipsr2_stats_clear_show, inode->i_private);
+}
+
+static const struct file_operations mipsr2_emul_fops = {
+       .open                   = mipsr2_stats_open,
+       .read                   = seq_read,
+       .llseek                 = seq_lseek,
+       .release                = single_release,
+};
+
+static const struct file_operations mipsr2_clear_fops = {
+       .open                   = mipsr2_stats_clear_open,
+       .read                   = seq_read,
+       .llseek                 = seq_lseek,
+       .release                = single_release,
+};
+
+
+static int __init mipsr2_init_debugfs(void)
+{
+       extern struct dentry    *mips_debugfs_dir;
+       struct dentry           *mipsr2_emul;
+
+       if (!mips_debugfs_dir)
+               return -ENODEV;
+
+       mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
+                                         mips_debugfs_dir, NULL,
+                                         &mipsr2_emul_fops);
+       if (!mipsr2_emul)
+               return -ENOMEM;
+
+       mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
+                                         mips_debugfs_dir, NULL,
+                                         &mipsr2_clear_fops);
+       if (!mipsr2_emul)
+               return -ENOMEM;
+
+       return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
index 17eaf0cf760c60eb08fad9d666877dc5606ee61b..291af0b5c4828adaa22ff0fc065f37c6a094fd85 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 #include <asm/ftrace.h>
+#include <asm/fpu.h>
+#include <asm/msa.h>
 
 extern void *__bzero(void *__s, size_t __count);
 extern long __strncpy_from_kernel_nocheck_asm(char *__to,
@@ -31,6 +33,14 @@ extern long __strnlen_kernel_asm(const char *s);
 extern long __strnlen_user_nocheck_asm(const char *s);
 extern long __strnlen_user_asm(const char *s);
 
+/*
+ * Core architecture code
+ */
+EXPORT_SYMBOL_GPL(_save_fp);
+#ifdef CONFIG_CPU_HAS_MSA
+EXPORT_SYMBOL_GPL(_save_msa);
+#endif
+
 /*
  * String functions
  */
@@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm);
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
+#ifndef CONFIG_CPU_MIPSR6
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_kernel);
 EXPORT_SYMBOL(__csum_partial_copy_to_user);
 EXPORT_SYMBOL(__csum_partial_copy_from_user);
+#endif
 
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
index f6547680c81cd7db68f5c77b19131a6f9fec05d4..423ae83af1fb7043a1daff5d06a079658446de5a 100644 (file)
        /*
         * check if we need to save FPU registers
         */
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-       LONG_L  t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
-
-       and     t0, t0, t1
-       LONG_S  t0, TI_FLAGS(t3)
+       .set push
+       .set noreorder
+       beqz    a3, 1f
+        PTR_L  t3, TASK_THREAD_INFO(a0)
+       .set pop
 
        /*
         * clear saved user stack CU1 bit
        .set pop
 1:
 
-       /* check if we need to save COP2 registers */
-       PTR_L   t2, TASK_THREAD_INFO(a0)
-       LONG_L  t0, ST_OFF(t2)
-       bbit0   t0, 30, 1f
-
-       /* Disable COP2 in the stored process state */
-       li      t1, ST0_CU2
-       xor     t0, t1
-       LONG_S  t0, ST_OFF(t2)
-
-       /* Enable COP2 so we can save it */
-       mfc0    t0, CP0_STATUS
-       or      t0, t1
-       mtc0    t0, CP0_STATUS
-
-       /* Save COP2 */
-       daddu   a0, THREAD_CP2
-       jal octeon_cop2_save
-       dsubu   a0, THREAD_CP2
-
-       /* Disable COP2 now that we are done */
-       mfc0    t0, CP0_STATUS
-       li      t1, ST0_CU2
-       xor     t0, t1
-       mtc0    t0, CP0_STATUS
-
-1:
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       dmfc0   t0, $11,7       /* CvmMemCtl */
        bbit0   t0, 6, 3f       /* Is user access enabled? */
 
        /* Store the CVMSEG state */
        .set reorder
 
        /* Disable access to CVMSEG */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       dmfc0   t0, $11,7       /* CvmMemCtl */
        xori    t0, t0, 0x40    /* Bit 6 is CVMSEG user enable */
-       mtc0    t0, $11,7       /* CvmMemCtl */
+       dmtc0   t0, $11,7       /* CvmMemCtl */
 #endif
 3:
 
  * void octeon_cop2_save(struct octeon_cop2_state *a0)
  */
        .align  7
+       .set push
+       .set noreorder
        LEAF(octeon_cop2_save)
 
        dmfc0   t9, $9,7        /* CvmCtl register. */
        dmfc2   t2, 0x0200
        sd      t0, OCTEON_CP2_CRC_IV(a0)
        sd      t1, OCTEON_CP2_CRC_LENGTH(a0)
-       sd      t2, OCTEON_CP2_CRC_POLY(a0)
        /* Skip next instructions if CvmCtl[NODFA_CP2] set */
        bbit1   t9, 28, 1f
+        sd     t2, OCTEON_CP2_CRC_POLY(a0)
 
        /* Save the LLM state */
        dmfc2   t0, 0x0402
        dmfc2   t1, 0x040A
        sd      t0, OCTEON_CP2_LLM_DAT(a0)
-       sd      t1, OCTEON_CP2_LLM_DAT+8(a0)
 
 1:     bbit1   t9, 26, 3f      /* done if CvmCtl[NOCRYPTO] set */
+        sd     t1, OCTEON_CP2_LLM_DAT+8(a0)
 
        /* Save the COP2 crypto state */
        /* this part is mostly common to both pass 1 and later revisions */
        sd      t2, OCTEON_CP2_AES_KEY+16(a0)
        dmfc2   t2, 0x0101
        sd      t3, OCTEON_CP2_AES_KEY+24(a0)
-       mfc0    t3, $15,0       /* Get the processor ID register */
+       mfc0    v0, $15,0       /* Get the processor ID register */
        sd      t0, OCTEON_CP2_AES_KEYLEN(a0)
-       li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
+       li      v1, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        sd      t1, OCTEON_CP2_AES_RESULT(a0)
-       sd      t2, OCTEON_CP2_AES_RESULT+8(a0)
        /* Skip to the Pass1 version of the remainder of the COP2 state */
-       beq     t3, t0, 2f
+       beq     v0, v1, 2f
+        sd     t2, OCTEON_CP2_AES_RESULT+8(a0)
 
        /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
        dmfc2   t1, 0x0240
        dmfc2   t2, 0x0241
+       ori     v1, v1, 0x9500 /* lowest OCTEON III PrId*/
        dmfc2   t3, 0x0242
+       subu    v1, v0, v1 /* prid - lowest OCTEON III PrId */
        dmfc2   t0, 0x0243
        sd      t1, OCTEON_CP2_HSH_DATW(a0)
        dmfc2   t1, 0x0244
        sd      t1, OCTEON_CP2_GFM_MULT+8(a0)
        sd      t2, OCTEON_CP2_GFM_POLY(a0)
        sd      t3, OCTEON_CP2_GFM_RESULT(a0)
-       sd      t0, OCTEON_CP2_GFM_RESULT+8(a0)
+       bltz    v1, 4f
+        sd     t0, OCTEON_CP2_GFM_RESULT+8(a0)
+       /* OCTEON III things*/
+       dmfc2   t0, 0x024F
+       dmfc2   t1, 0x0050
+       sd      t0, OCTEON_CP2_SHA3(a0)
+       sd      t1, OCTEON_CP2_SHA3+8(a0)
+4:
        jr      ra
+        nop
 
 2:     /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
        dmfc2   t3, 0x0040
 
 3:     /* pass 1 or CvmCtl[NOCRYPTO] set */
        jr      ra
+        nop
        END(octeon_cop2_save)
+       .set pop
 
 /*
  * void octeon_cop2_restore(struct octeon_cop2_state *a0)
        ld      t2, OCTEON_CP2_AES_RESULT+8(a0)
        mfc0    t3, $15,0       /* Get the processor ID register */
        dmtc2   t0, 0x0110
-       li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
+       li      v0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        dmtc2   t1, 0x0100
-       bne     t0, t3, 3f      /* Skip the next stuff for non-pass1 */
+       bne     v0, t3, 3f      /* Skip the next stuff for non-pass1 */
         dmtc2  t2, 0x0101
 
        /* this code is specific for pass 1 */
 
 3:     /* this is post-pass1 code */
        ld      t2, OCTEON_CP2_HSH_DATW(a0)
+       ori     v0, v0, 0x9500 /* lowest OCTEON III PrId*/
        ld      t0, OCTEON_CP2_HSH_DATW+8(a0)
        ld      t1, OCTEON_CP2_HSH_DATW+16(a0)
        dmtc2   t2, 0x0240
        dmtc2   t2, 0x0259
        ld      t2, OCTEON_CP2_GFM_RESULT+8(a0)
        dmtc2   t0, 0x025E
+       subu    v0, t3, v0 /* prid - lowest OCTEON III PrId */
        dmtc2   t1, 0x025A
-       dmtc2   t2, 0x025B
-
+       bltz    v0, done_restore
+        dmtc2  t2, 0x025B
+       /* OCTEON III things*/
+       ld      t0, OCTEON_CP2_SHA3(a0)
+       ld      t1, OCTEON_CP2_SHA3+8(a0)
+       dmtc2   t0, 0x0051
+       dmtc2   t1, 0x0050
 done_restore:
        jr      ra
         nop
@@ -450,18 +440,23 @@ done_restore:
  * void octeon_mult_save()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- *      safely modify k0 and k1.
+ * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
+ *       safely modify v1,k0, k1,$10-$15, and $24.  It will
+ *      be overwritten with a processor specific version of the code.
  */
-       .align  7
+       .p2align 7
        .set push
        .set noreorder
        LEAF(octeon_mult_save)
-       dmfc0   k0, $9,7        /* CvmCtl register. */
-       bbit1   k0, 27, 1f      /* Skip CvmCtl[NOMUL] */
+       jr      ra
         nop
+       .space 30 * 4, 0
+octeon_mult_save_end:
+       EXPORT(octeon_mult_save_end)
+       END(octeon_mult_save)
 
-       /* Save the multiplier state */
+       LEAF(octeon_mult_save2)
+       /* Save the multiplier state OCTEON II and earlier*/
        v3mulu  k0, $0, $0
        v3mulu  k1, $0, $0
        sd      k0, PT_MTP(sp)        /* PT_MTP    has P0 */
@@ -476,44 +471,107 @@ done_restore:
        sd      k0, PT_MPL+8(sp)      /* PT_MPL+8  has MPL1 */
        jr      ra
         sd     k1, PT_MPL+16(sp)     /* PT_MPL+16 has MPL2 */
-
-1:     /* Resume here if CvmCtl[NOMUL] */
+octeon_mult_save2_end:
+       EXPORT(octeon_mult_save2_end)
+       END(octeon_mult_save2)
+
+       LEAF(octeon_mult_save3)
+       /* Save the multiplier state OCTEON III */
+       v3mulu  $10, $0, $0             /* read P0 */
+       v3mulu  $11, $0, $0             /* read P1 */
+       v3mulu  $12, $0, $0             /* read P2 */
+       sd      $10, PT_MTP+(0*8)(sp)   /* store P0 */
+       v3mulu  $10, $0, $0             /* read P3 */
+       sd      $11, PT_MTP+(1*8)(sp)   /*  store P1 */
+       v3mulu  $11, $0, $0             /* read P4 */
+       sd      $12, PT_MTP+(2*8)(sp)   /* store P2 */
+       ori     $13, $0, 1
+       v3mulu  $12, $0, $0             /* read P5 */
+       sd      $10, PT_MTP+(3*8)(sp)   /* store P3 */
+       v3mulu  $13, $13, $0            /* P4-P0 = MPL5-MPL1, $13 = MPL0 */
+       sd      $11, PT_MTP+(4*8)(sp)   /* store P4 */
+       v3mulu  $10, $0, $0             /* read MPL1 */
+       sd      $12, PT_MTP+(5*8)(sp)   /* store P5 */
+       v3mulu  $11, $0, $0             /* read MPL2 */
+       sd      $13, PT_MPL+(0*8)(sp)   /* store MPL0 */
+       v3mulu  $12, $0, $0             /* read MPL3 */
+       sd      $10, PT_MPL+(1*8)(sp)   /* store MPL1 */
+       v3mulu  $10, $0, $0             /* read MPL4 */
+       sd      $11, PT_MPL+(2*8)(sp)   /* store MPL2 */
+       v3mulu  $11, $0, $0             /* read MPL5 */
+       sd      $12, PT_MPL+(3*8)(sp)   /* store MPL3 */
+       sd      $10, PT_MPL+(4*8)(sp)   /* store MPL4 */
        jr      ra
-       END(octeon_mult_save)
+        sd     $11, PT_MPL+(5*8)(sp)   /* store MPL5 */
+octeon_mult_save3_end:
+       EXPORT(octeon_mult_save3_end)
+       END(octeon_mult_save3)
        .set pop
 
 /*
  * void octeon_mult_restore()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ * NOTE: This is called in RESTORE_TEMP in stackframe.h.
  */
-       .align  7
+       .p2align 7
        .set push
        .set noreorder
        LEAF(octeon_mult_restore)
-       dmfc0   k1, $9,7                /* CvmCtl register. */
-       ld      v0, PT_MPL(sp)          /* MPL0 */
-       ld      v1, PT_MPL+8(sp)        /* MPL1 */
-       ld      k0, PT_MPL+16(sp)       /* MPL2 */
-       bbit1   k1, 27, 1f              /* Skip CvmCtl[NOMUL] */
-       /* Normally falls through, so no time wasted here */
-       nop
+       jr      ra
+        nop
+       .space 30 * 4, 0
+octeon_mult_restore_end:
+       EXPORT(octeon_mult_restore_end)
+       END(octeon_mult_restore)
 
+       LEAF(octeon_mult_restore2)
+       ld      v0, PT_MPL(sp)          /* MPL0 */
+       ld      v1, PT_MPL+8(sp)        /* MPL1 */
+       ld      k0, PT_MPL+16(sp)       /* MPL2 */
        /* Restore the multiplier state */
-       ld      k1, PT_MTP+16(sp)       /* P2 */
-       MTM0    v0                      /* MPL0 */
+       ld      k1, PT_MTP+16(sp)       /* P2 */
+       mtm0    v0                      /* MPL0 */
        ld      v0, PT_MTP+8(sp)        /* P1 */
-       MTM1    v1                      /* MPL1 */
-       ld      v1, PT_MTP(sp)          /* P0 */
-       MTM2    k0                      /* MPL2 */
-       MTP2    k1                      /* P2 */
-       MTP1    v0                      /* P1 */
+       mtm1    v1                      /* MPL1 */
+       ld      v1, PT_MTP(sp)          /* P0 */
+       mtm2    k0                      /* MPL2 */
+       mtp2    k1                      /* P2 */
+       mtp1    v0                      /* P1 */
        jr      ra
-        MTP0   v1                      /* P0 */
-
-1:     /* Resume here if CvmCtl[NOMUL] */
+        mtp0   v1                      /* P0 */
+octeon_mult_restore2_end:
+       EXPORT(octeon_mult_restore2_end)
+       END(octeon_mult_restore2)
+
+       LEAF(octeon_mult_restore3)
+       ld      $12, PT_MPL+(0*8)(sp)   /* read MPL0 */
+       ld      $13, PT_MPL+(3*8)(sp)   /* read MPL3 */
+       ld      $10, PT_MPL+(1*8)(sp)   /* read MPL1 */
+       ld      $11, PT_MPL+(4*8)(sp)   /* read MPL4 */
+       .word   0x718d0008
+       /* mtm0 $12, $13                   restore MPL0 and MPL3 */
+       ld      $12, PT_MPL+(2*8)(sp)   /* read MPL2 */
+       .word   0x714b000c
+       /* mtm1 $10, $11                   restore MPL1 and MPL4 */
+       ld      $13, PT_MPL+(5*8)(sp)   /* read MPL5 */
+       ld      $10, PT_MTP+(0*8)(sp)   /* read P0 */
+       ld      $11, PT_MTP+(3*8)(sp)   /* read P3 */
+       .word   0x718d000d
+       /* mtm2 $12, $13                   restore MPL2 and MPL5 */
+       ld      $12, PT_MTP+(1*8)(sp)   /* read P1 */
+       .word   0x714b0009
+       /* mtp0 $10, $11                   restore P0 and P3 */
+       ld      $13, PT_MTP+(4*8)(sp)   /* read P4 */
+       ld      $10, PT_MTP+(2*8)(sp)   /* read P2 */
+       ld      $11, PT_MTP+(5*8)(sp)   /* read P5 */
+       .word   0x718d000a
+       /* mtp1 $12, $13                   restore P1 and P4 */
        jr      ra
-        nop
-       END(octeon_mult_restore)
+       .word   0x714b000b
+       /* mtp2 $10, $11                   restore P2 and P5 */
+
+octeon_mult_restore3_end:
+       EXPORT(octeon_mult_restore3_end)
+       END(octeon_mult_restore3)
        .set pop
index 097fc8d14e4225288733bac25f6ea17c6737d1c8..130af7d26a9c5de2fe2ec5fdd5969d9bd4a5f42a 100644 (file)
@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "]\n");
        }
 
-       seq_printf(m, "isa\t\t\t: mips1");
+       seq_printf(m, "isa\t\t\t:"); 
+       if (cpu_has_mips_r1)
+               seq_printf(m, " mips1");
        if (cpu_has_mips_2)
                seq_printf(m, "%s", " mips2");
        if (cpu_has_mips_3)
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "%s", " mips32r1");
        if (cpu_has_mips32r2)
                seq_printf(m, "%s", " mips32r2");
+       if (cpu_has_mips32r6)
+               seq_printf(m, "%s", " mips32r6");
        if (cpu_has_mips64r1)
                seq_printf(m, "%s", " mips64r1");
        if (cpu_has_mips64r2)
                seq_printf(m, "%s", " mips64r2");
+       if (cpu_has_mips64r6)
+               seq_printf(m, "%s", " mips64r6");
        seq_printf(m, "\n");
 
        seq_printf(m, "ASEs implemented\t:");
index 85bff5d513e5b42ae483e414c14a4a844793b9a1..bf85cc180d9105b7c605d049ccfdb352c76cc066 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/completion.h>
 #include <linux/kallsyms.h>
 #include <linux/random.h>
+#include <linux/prctl.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        smp_call_function(arch_dump_stack, NULL, 1);
 }
+
+int mips_get_process_fp_mode(struct task_struct *task)
+{
+       int value = 0;
+
+       if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
+               value |= PR_FP_MODE_FR;
+       if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
+               value |= PR_FP_MODE_FRE;
+
+       return value;
+}
+
+int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+{
+       const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+       unsigned long switch_count;
+       struct task_struct *t;
+
+       /* Check the value is valid */
+       if (value & ~known_bits)
+               return -EOPNOTSUPP;
+
+       /* Avoid inadvertently triggering emulation */
+       if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+           !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+               return -EOPNOTSUPP;
+       if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+               return -EOPNOTSUPP;
+
+       /* FR = 0 not supported in MIPS R6 */
+       if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+               return -EOPNOTSUPP;
+
+       /* Save FP & vector context, then disable FPU & MSA */
+       if (task->signal == current->signal)
+               lose_fpu(1);
+
+       /* Prevent any threads from obtaining live FP context */
+       atomic_set(&task->mm->context.fp_mode_switching, 1);
+       smp_mb__after_atomic();
+
+       /*
+        * If there are multiple online CPUs then wait until all threads whose
+        * FP mode is about to change have been context switched. This approach
+        * allows us to only worry about whether an FP mode switch is in
+        * progress when FP is first used in a tasks time slice. Pretty much all
+        * of the mode switch overhead can thus be confined to cases where mode
+        * switches are actually occuring. That is, to here. However for the
+        * thread performing the mode switch it may take a while...
+        */
+       if (num_online_cpus() > 1) {
+               spin_lock_irq(&task->sighand->siglock);
+
+               for_each_thread(task, t) {
+                       if (t == current)
+                               continue;
+
+                       switch_count = t->nvcsw + t->nivcsw;
+
+                       do {
+                               spin_unlock_irq(&task->sighand->siglock);
+                               cond_resched();
+                               spin_lock_irq(&task->sighand->siglock);
+                       } while ((t->nvcsw + t->nivcsw) == switch_count);
+               }
+
+               spin_unlock_irq(&task->sighand->siglock);
+       }
+
+       /*
+        * There are now no threads of the process with live FP context, so it
+        * is safe to proceed with the FP mode switch.
+        */
+       for_each_thread(task, t) {
+               /* Update desired FP register width */
+               if (value & PR_FP_MODE_FR) {
+                       clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+               } else {
+                       set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+                       clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
+               }
+
+               /* Update desired FP single layout */
+               if (value & PR_FP_MODE_FRE)
+                       set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+               else
+                       clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+       }
+
+       /* Allow threads to use FP again */
+       atomic_set(&task->mm->context.fp_mode_switching, 0);
+
+       return 0;
+}
index 6c160c67984c014e53a3a068698fdef58a9b9345..676c5030a953bf9cca5ad038a7526d3b94ce372d 100644 (file)
@@ -34,7 +34,7 @@
        .endm
 
        .set    noreorder
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
 
 LEAF(_save_fp_context)
        .set    push
@@ -42,7 +42,8 @@ LEAF(_save_fp_context)
        cfc1    t1, fcr31
        .set    pop
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        .set    push
        SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32)
        SET_HARDFLOAT
        cfc1    t1, fcr31
 
+#ifndef CONFIG_CPU_MIPS64_R6
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip storing odd if FR=0
         nop
+#endif
 
        /* Store the 16 odd double precision registers */
        EX      sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32)
 LEAF(_restore_fp_context)
        EX      lw t1, SC_FPC_CSR(a0)
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
+               defined(CONFIG_CPU_MIPS32_R6)
        .set    push
        SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32)
        SET_HARDFLOAT
        EX      lw t1, SC32_FPC_CSR(a0)
 
+#ifndef CONFIG_CPU_MIPS64_R6
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip loading odd if FR=0
         nop
+#endif
 
        EX      ldc1 $f1, SC32_FPREGS+8(a0)
        EX      ldc1 $f3, SC32_FPREGS+24(a0)
index 64591e671878f41d9c4d0551806783b65578eb23..3b1a36f13a7dd915c235f11e073c805264f90960 100644 (file)
  * Save a thread's fp context.
  */
 LEAF(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_save_double a0 t0 t1                # clobbers t1
@@ -126,7 +127,8 @@ LEAF(_save_fp)
  * Restore a thread's fp context.
  */
 LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_restore_double a0 t0 t1             # clobbers t1
@@ -240,9 +242,9 @@ LEAF(_init_fpu)
        mtc1    t1, $f30
        mtc1    t1, $f31
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
        .set    push
-       .set    mips32r2
+       .set    MIPS_ISA_LEVEL_RAW
        .set    fp=64
        sll     t0, t0, 5                       # is Status.FR set?
        bgez    t0, 1f                          # no: skip setting upper 32b
@@ -280,9 +282,9 @@ LEAF(_init_fpu)
        mthc1   t1, $f30
        mthc1   t1, $f31
 1:     .set    pop
-#endif /* CONFIG_CPU_MIPS32_R2 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
 #else
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
        dmtc1   t1, $f0
        dmtc1   t1, $f2
        dmtc1   t1, $f4
index 67f2495def1cd18615210c32e2cc111ea66c8fe0..d1168d7c31e8ef37c51568b93cf676e9a6c3ef81 100644 (file)
@@ -208,6 +208,7 @@ void spram_config(void)
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
        case CPU_P5600:
+       case CPU_QEMU_GENERIC:
                config0 = read_c0_config();
                /* FIXME: addresses are Malta specific */
                if (config0 & (1<<24)) {
index 604b558809c4c4474bfd953507ca7e54af5e9eca..53a7ef9a8f320c5b14c5abb2d3e1518c0ad7f756 100644 (file)
@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
                : "memory");
        } else if (cpu_has_llsc) {
                __asm__ __volatile__ (
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "       li      %[err], 0                               \n"
                "1:     ll      %[old], (%[addr])                       \n"
                "       move    %[tmp], %[new]                          \n"
index c3b41e24c05a47337509b9579d5b1302ba6f6e80..33984c04b60b710516f1b0bfb88aa52aaa04629f 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/idle.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
@@ -837,7 +838,7 @@ out:
        exception_exit(prev_state);
 }
 
-static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        const char *str)
 {
        siginfo_t info;
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs)
        unsigned int opcode = 0;
        int status = -1;
 
+       /*
+        * Avoid any kernel code. Just emulate the R2 instruction
+        * as quickly as possible.
+        */
+       if (mipsr2_emulation && cpu_has_mips_r6 &&
+           likely(user_mode(regs))) {
+               if (likely(get_user(opcode, epc) >= 0)) {
+                       status = mipsr2_decoder(regs, opcode);
+                       switch (status) {
+                       case 0:
+                       case SIGEMT:
+                               task_thread_info(current)->r2_emul_return = 1;
+                               return;
+                       case SIGILL:
+                               goto no_r2_instr;
+                       default:
+                               process_fpemu_return(status,
+                                                    &current->thread.cp0_baduaddr);
+                               task_thread_info(current)->r2_emul_return = 1;
+                               return;
+                       }
+               }
+       }
+
+no_r2_instr:
+
        prev_state = exception_enter();
+
        if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
                       SIGILL) == NOTIFY_STOP)
                goto out;
@@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;
 }
 
+static int wait_on_fp_mode_switch(atomic_t *p)
+{
+       /*
+        * The FP mode for this task is currently being switched. That may
+        * involve modifications to the format of this tasks FP context which
+        * make it unsafe to proceed with execution for the moment. Instead,
+        * schedule some other task.
+        */
+       schedule();
+       return 0;
+}
+
 static int enable_restore_fp_context(int msa)
 {
        int err, was_fpu_owner, prior_msa;
 
+       /*
+        * If an FP mode switch is currently underway, wait for it to
+        * complete before proceeding.
+        */
+       wait_on_atomic_t(&current->mm->context.fp_mode_switching,
+                        wait_on_fp_mode_switch, TASK_KILLABLE);
+
        if (!used_math()) {
                /* First time FP context user. */
                preempt_disable();
@@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void)
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
        case CPU_P5600:
+       case CPU_QEMU_GENERIC:
                {
 #define ERRCTL_PE      0x80000000
 #define ERRCTL_L2P     0x00800000
@@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void)
        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
               reg_val & (1<<30) ? "secondary" : "primary",
               reg_val & (1<<31) ? "data" : "insn");
-       if (cpu_has_mips_r2 &&
+       if ((cpu_has_mips_r2_r6) &&
            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
                        reg_val & (1<<29) ? "ED " : "",
@@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void)
        unsigned int reg_val;
 
        /* For the moment, report the problem and hang. */
-       if (cpu_has_mips_r2 &&
+       if ((cpu_has_mips_r2_r6) &&
            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
                       read_c0_ecc());
@@ -1959,7 +2007,7 @@ static void configure_hwrena(void)
 {
        unsigned int hwrena = cpu_hwrena_impl_bits;
 
-       if (cpu_has_mips_r2)
+       if (cpu_has_mips_r2_r6)
                hwrena |= 0x0000000f;
 
        if (!noulri && cpu_has_userlocal)
@@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
         *  o read IntCtl.IPTI to determine the timer interrupt
         *  o read IntCtl.IPPCI to determine the performance counter interrupt
         */
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2094,7 +2142,7 @@ void __init trap_init(void)
 #else
         ebase = CKSEG0;
 #endif
-               if (cpu_has_mips_r2)
+               if (cpu_has_mips_r2_r6)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
 
index e11906dff8850fc277b14985eafdbbb4d2c3ece9..bbb69695a0a10765f4c6bdf300da304667c021f5 100644 (file)
@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lb("%0", "0(%2)")"\n\t"    \
+                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define            LoadWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lbu("%0", "0(%2)")"\n\t"   \
+                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:lb\t%0, 0(%2)\n\t"               \
+                       "2:lbu\t $1, 1(%2)\n\t"             \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:lbu\t$1, 2(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:lbu\t$1, 3(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "5:lbu\t$1, 4(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "6:lbu\t$1, 5(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "7:lbu\t$1, 6(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "8:lbu\t$1, 7(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n\t"                     \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=r" (res)                        \
                        : "r" (value), "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_swl("%1", "(%2)")"\n"    \
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=r" (res)                                \
                : "r" (value), "r" (addr), "i" (-EFAULT));
-#endif
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_sb("%1", "3(%2)")"\n\t"    \
+                       "srl\t$1, %1, 0x8\n\t"              \
+                       "2:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "srl\t$1, $1,  0x8\n\t"             \
+                       "3:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "srl\t$1, $1, 0x8\n\t"              \
+                       "4:"user_sb("$1", "0(%2)")"\n\t"    \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:sb\t%1, 7(%2)\n\t"               \
+                       "dsrl\t$1, %1, 0x8\n\t"             \
+                       "2:sb\t$1, 6(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "3:sb\t$1, 5(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "4:sb\t$1, 4(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "5:sb\t$1, 3(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "6:sb\t$1, 2(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "7:sb\t$1, 1(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "8:sb\t$1, 0(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#else /* __BIG_ENDIAN */
 
-#ifdef __LITTLE_ENDIAN
 #define     LoadHW(addr, value, res)  \
                __asm__ __volatile__ (".set\tnoat\n"        \
                        "1:\t"user_lb("%0", "1(%2)")"\n"    \
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lb("%0", "3(%2)")"\n\t"    \
+                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define            LoadWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lbu("%0", "3(%2)")"\n\t"   \
+                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:lb\t%0, 7(%2)\n\t"               \
+                       "2:lbu\t$1, 6(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:lbu\t$1, 5(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:lbu\t$1, 4(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "5:lbu\t$1, 3(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "6:lbu\t$1, 2(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "7:lbu\t$1, 1(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "8:lbu\t$1, 0(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n\t"                     \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=r" (res)                        \
                        : "r" (value), "r" (addr), "i" (-EFAULT));
-
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_swl("%1", "3(%2)")"\n"   \
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=r" (res)                                \
                : "r" (value), "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_sb("%1", "0(%2)")"\n\t"    \
+                       "srl\t$1, %1, 0x8\n\t"              \
+                       "2:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "srl\t$1, $1,  0x8\n\t"             \
+                       "3:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "srl\t$1, $1, 0x8\n\t"              \
+                       "4:"user_sb("$1", "3(%2)")"\n\t"    \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:sb\t%1, 0(%2)\n\t"               \
+                       "dsrl\t$1, %1, 0x8\n\t"             \
+                       "2:sb\t$1, 1(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "3:sb\t$1, 2(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "4:sb\t$1, 3(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "5:sb\t$1, 4(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "6:sb\t$1, 5(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "7:sb\t$1, 6(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "8:sb\t$1, 7(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
 #endif
 
 static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                        break;
                return;
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * COP2 is available to implementor for application specific use.
         * It's up to applications to register a notifier chain and do
         * whatever they have to do, including possible sending of signals.
+        *
+        * This instruction has been reallocated in Release 6
         */
        case lwc2_op:
                cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sdc2_op:
                cu2_notifier_call_chain(CU2_SDC2_OP, regs);
                break;
-
+#endif
        default:
                /*
                 * Pheeee...  We encountered an yet unknown instruction or
index eeddc58802e11a57595ee7d123e0538577af0c67..1e9e900cd3c382a4a8c3bce2ab7ca2987a1acfcd 100644 (file)
@@ -8,6 +8,7 @@ lib-y   += bitops.o csum_partial.o delay.o memcpy.o memset.o \
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM)     := $(filter-out csum_partial.o, $(lib-y))
 
 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
 obj-$(CONFIG_CPU_R3000)                += r3k_dump_tlb.o
index 5d3238af9b5cc551ecb0ca9670b242b62f181a73..9245e1705e691124ad3f4eb563cfe680184c30cf 100644 (file)
         and    t0, src, ADDRMASK
        PREFS(  0, 2*32(src) )
        PREFD(  1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
        bnez    t1, .Ldst_unaligned\@
         nop
        bnez    t0, .Lsrc_unaligned_dst_aligned\@
+#else
+       or      t0, t0, t1
+       bnez    t0, .Lcopy_unaligned_bytes\@
+#endif
        /*
         * use delay slot for fall-through
         * src and dst are aligned; need to compute rem
        bne     rem, len, 1b
        .set    noreorder
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
         * A loop would do only a byte at a time with possible branch
        bne     len, rem, 1b
        .set    noreorder
 
+#endif /* !CONFIG_CPU_MIPSR6 */
 .Lcopy_bytes_checklen\@:
        beqz    len, .Ldone\@
         nop
 .Ldone\@:
        jr      ra
         nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes\@:
+1:
+       COPY_BYTE(0)
+       COPY_BYTE(1)
+       COPY_BYTE(2)
+       COPY_BYTE(3)
+       COPY_BYTE(4)
+       COPY_BYTE(5)
+       COPY_BYTE(6)
+       COPY_BYTE(7)
+       ADD     src, src, 8
+       b       1b
+        ADD    dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
        .if __memcpy == 1
        END(memcpy)
        .set __memcpy, 0
index c8fe6b1968fb313dca7e9bb307ec89b159746730..b8e63fd0037547cb9939907696da7ae55d699a95 100644 (file)
        .set            at
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
        EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
        PTR_SUBU        a0, t0                  /* long align ptr */
        PTR_ADDU        a2, t0                  /* correct size */
 
+#else /* CONFIG_CPU_MIPSR6 */
+#define STORE_BYTE(N)                          \
+       EX(sb, a1, N(a0), .Lbyte_fixup\@);      \
+       beqz            t0, 0f;                 \
+       PTR_ADDU        t0, 1;
+
+       PTR_ADDU        a2, t0                  /* correct size */
+       PTR_ADDU        t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+       ori             a0, STORMASK
+       xori            a0, STORMASK
+       PTR_ADDIU       a0, STORSIZE
+#endif /* CONFIG_CPU_MIPSR6 */
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial\@  /* no block to fill */
        andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
+#ifndef CONFIG_CPU_MIPSR6
        PTR_ADDU        a0, a2                  /* What's left */
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 #else
        EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
+#else
+       PTR_SUBU        t0, $0, a2
+       PTR_ADDIU       t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif
 1:     jr              ra
        move            a2, zero
 
        .hidden __memset
        .endif
 
+.Lbyte_fixup\@:
+       PTR_SUBU        a2, $0, t0
+       jr              ra
+        PTR_ADDIU      a2, 1
+
 .Lfirst_fixup\@:
        jr      ra
        nop
index be777d9a3f85969a6b3104d392402ffe37d12c9d..272af8ac2425290c892849186b170f4b95c259b6 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/stringify.h>
 
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
 
 /*
  * For cli() we have to insert nops to make sure that the new value
index 9dfcd7fc1bc3dd712980c93f95ea4b8c1f3049d9..b30bf65c7d7d81ea1ed7e714d7a3554139855207 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/processor.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu.h>
+#include <asm/mips-r2-to-r6-emul.h>
 
 #include "ieee754.h"
 
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *,
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
 /* convert condition code register number to csr bit */
-static const unsigned int fpucondbit[8] = {
+const unsigned int fpucondbit[8] = {
        FPU_CSR_COND0,
        FPU_CSR_COND1,
        FPU_CSR_COND2,
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                        /* Fall through */
                case jr_op:
+                       /* For R6, JR already emulated in jalr_op */
+                       if (NO_R6EMU && insn.r_format.opcode == jr_op)
+                               break;
                        *contpc = regs->regs[insn.r_format.rs];
                        return 1;
                }
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                switch (insn.i_format.rt) {
                case bltzal_op:
                case bltzall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bltzall_op))
+                               break;
+
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
-               case bltz_op:
                case bltzl_op:
+                       if (NO_R6EMU)
+                               break;
+               case bltz_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                        return 1;
                case bgezal_op:
                case bgezall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bgezall_op))
+                               break;
+
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
-               case bgez_op:
                case bgezl_op:
+                       if (NO_R6EMU)
+                               break;
+               case bgez_op:
                        if ((long)regs->regs[insn.i_format.rs] >= 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                /* Set microMIPS mode bit: XOR for jalx. */
                *contpc ^= bit;
                return 1;
-       case beq_op:
        case beql_op:
+               if (NO_R6EMU)
+                       break;
+       case beq_op:
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case bne_op:
        case bnel_op:
+               if (NO_R6EMU)
+                       break;
+       case bne_op:
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case blez_op:
        case blezl_op:
+               if (NO_R6EMU)
+                       break;
+       case blez_op:
+
+               /*
+                * Compact branches for R6 for the
+                * blez and blezl opcodes.
+                * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+                * BLEZ  | rs = rt != 0      == BGEZALC
+                * BLEZ  | rs != 0 | rt != 0 == BGEUC
+                * BLEZL | rs = 0 | rt != 0  == BLEZC
+                * BLEZL | rs = rt != 0      == BGEZC
+                * BLEZL | rs != 0 | rt != 0 == BGEC
+                *
+                * For real BLEZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+
+                       return 1;
+               }
                if ((long)regs->regs[insn.i_format.rs] <= 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case bgtz_op:
        case bgtzl_op:
+               if (NO_R6EMU)
+                       break;
+       case bgtz_op:
+               /*
+                * Compact branches for R6 for the
+                * bgtz and bgtzl opcodes.
+                * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+                * BGTZ  | rs = rt != 0      == BLTZALC
+                * BGTZ  | rs != 0 | rt != 0 == BLTUC
+                * BGTZL | rs = 0 | rt != 0  == BGTZC
+                * BGTZL | rs = rt != 0      == BLTZC
+                * BGTZL | rs != 0 | rt != 0 == BLTC
+                *
+                * *ZALC varint for BGTZ &&& rt != 0
+                * For real GTZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+
+                       return 1;
+               }
+
                if ((long)regs->regs[insn.i_format.rs] > 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
+       case cbcond0_op:
+       case cbcond1_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               if (insn.i_format.rt && !insn.i_format.rs)
+                       regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                else
                        *contpc = regs->cp0_epc + 8;
                return 1;
+#else
+       case bc6_op:
+               /*
+                * Only valid for MIPS R6 but we can still end up
+                * here from a broken userland so just tell emulator
+                * this is not a branch and let it break later on.
+                */
+               if  (!cpu_has_mips_r6)
+                       break;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case balc6_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case beqzcjic_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case bnezcjialc_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               if (!insn.i_format.rs)
+                       regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
 #endif
        case cop0_op:
        case cop1_op:
+               /* Need to check for R6 bc1nez and bc1eqz branches */
+               if (cpu_has_mips_r6 &&
+                   ((insn.i_format.rs == bc1eqz_op) ||
+                    (insn.i_format.rs == bc1nez_op))) {
+                       bit = 0;
+                       switch (insn.i_format.rs) {
+                       case bc1eqz_op:
+                               if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
+                                   bit = 1;
+                               break;
+                       case bc1nez_op:
+                               if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
+                                   bit = 1;
+                               break;
+                       }
+                       if (bit)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+
+                       return 1;
+               }
+               /* R2/R6 compatible cop1 instruction. Fall through */
        case cop2_op:
        case cop1x_op:
                if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                 * achieve full IEEE-754 accuracy - however this emulator does.
                 */
                case frsqrt_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_sp_rsqrt;
                        goto scopuop;
 
                case frecip_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@ copcsr:
                 * achieve full IEEE-754 accuracy - however this emulator does.
                 */
                case frsqrt_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_dp_rsqrt;
                        goto dcopuop;
                case frecip_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_dp_recip;
index dd261df005c20c6ce7540fb458a2ec70317d6c99..3f8059602765ea9703841715e15c60a72ab1d123 100644 (file)
@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
                __asm__ __volatile__ (
                        ".set push\n\t"
                        ".set noat\n\t"
-                       ".set mips3\n\t"
+                       ".set "MIPS_ISA_LEVEL"\n\t"
 #ifdef CONFIG_32BIT
                        "la     $at,1f\n\t"
 #endif
@@ -1255,6 +1255,7 @@ static void probe_pcache(void)
        case CPU_P5600:
        case CPU_PROAPTIV:
        case CPU_M5150:
+       case CPU_QEMU_GENERIC:
                if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
                    (c->icache.waysize > PAGE_SIZE))
                        c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@ static void setup_scache(void)
 
        default:
                if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+                                   MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+                                   MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
 #ifdef CONFIG_MIPS_CPU_SCACHE
                        if (mips_sc_init ()) {
                                scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
index 70ab5d664332694e92305331f13ed15a35ab1956..7ff8637e530d7974d002594797d42044505a0467 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/ratelimit.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
@@ -28,6 +29,8 @@
 #include <asm/highmem.h>               /* For VMALLOC_END */
 #include <linux/kdebug.h>
 
+int show_unhandled_signals = 1;
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
        int fault;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
               current->comm, current->pid, field, address, write,
@@ -203,15 +208,21 @@ bad_area_nosemaphore:
        if (user_mode(regs)) {
                tsk->thread.cp0_badvaddr = address;
                tsk->thread.error_code = write;
-#if 0
-               printk("do_page_fault() #2: sending SIGSEGV to %s for "
-                      "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
-                      tsk->comm,
-                      write ? "write access to" : "read access from",
-                      field, address,
-                      field, (unsigned long) regs->cp0_epc,
-                      field, (unsigned long) regs->regs[31]);
-#endif
+               if (show_unhandled_signals &&
+                   unhandled_signal(tsk, SIGSEGV) &&
+                   __ratelimit(&ratelimit_state)) {
+                       pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
+                               tsk->comm,
+                               write ? "write access to" : "read access from",
+                               field, address);
+                       pr_info("epc = %0*lx in", field,
+                               (unsigned long) regs->cp0_epc);
+                       print_vma_addr(" ", regs->cp0_epc);
+                       pr_info("ra  = %0*lx in", field,
+                               (unsigned long) regs->regs[31]);
+                       print_vma_addr(" ", regs->regs[31]);
+                       pr_info("\n");
+               }
                info.si_signo = SIGSEGV;
                info.si_errno = 0;
                /* info.si_code has been set above */
index b611102e23b5c72948ef5c5b8c1e12d603a0ff15..3f85f921801b84f7f91cd67e42bbe528552d6860 100644 (file)
@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5];
 #define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
+/*
+ * R6 has a limited offset of the pref instruction.
+ * Skip it if the offset is more than 9 bits.
+ */
+#define _uasm_i_pref(a, b, c, d)               \
+do {                                           \
+       if (cpu_has_mips_r6) {                  \
+               if (c <= 0xff && c >= -0x100)   \
+                       uasm_i_pref(a, b, c, d);\
+       } else {                                \
+               uasm_i_pref(a, b, c, d);        \
+       }                                       \
+} while(0)
+
 static int pref_bias_clear_store;
 static int pref_bias_copy_load;
 static int pref_bias_copy_store;
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void)
                        pref_bias_copy_load = 256;
                        pref_bias_copy_store = 128;
                        pref_src_mode = Pref_LoadStreamed;
-                       pref_dst_mode = Pref_PrepareForStore;
+                       if (cpu_has_mips_r6)
+                               /*
+                                * Bit 30 (Pref_PrepareForStore) has been
+                                * removed from MIPS R6. Use bit 5
+                                * (Pref_StoreStreamed).
+                                */
+                               pref_dst_mode = Pref_StoreStreamed;
+                       else
+                               pref_dst_mode = Pref_PrepareForStore;
                        break;
                }
        } else {
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_clear_store) {
-               uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+               _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
                            A0);
        } else if (cache_line_size == (half_clear_loop_size << 1)) {
                if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_copy_load)
-               uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+               _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
 }
 
 static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_copy_store) {
-               uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+               _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
                            A0);
        } else if (cache_line_size == (half_copy_loop_size << 1)) {
                if (cpu_has_cache_cdex_s) {
index 99eb8fabab606afe28781620301f430e4b776fd4..4ceafd13870cd6945634713f04ac3f7d65ba52d2 100644 (file)
@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
        case CPU_PROAPTIV:
        case CPU_P5600:
        case CPU_BMIPS5000:
+       case CPU_QEMU_GENERIC:
                if (config2 & (1 << 12))
                        return 0;
        }
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void)
 
        /* Ignore anything but MIPSxx processors */
        if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                             MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+                             MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+                             MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
                return 0;
 
        /* Does this MIPS32/MIPS64 CPU have a config2 register? */
index 30639a6e9b8ca3ad3677afb0cbd553b9c9472c18..b2afa49beab082116e282f8b043cb2a65e4c06ce 100644 (file)
@@ -485,13 +485,11 @@ static void r4k_tlb_configure(void)
                 * Enable the no read, no exec bits, and enable large virtual
                 * address.
                 */
-               u32 pg = PG_RIE | PG_XIE;
 #ifdef CONFIG_64BIT
-               pg |= PG_ELPA;
+               set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
+#else
+               set_c0_pagegrain(PG_RIE | PG_XIE);
 #endif
-               if (cpu_has_rixiex)
-                       pg |= PG_IEC;
-               write_c0_pagegrain(pg);
        }
 
        temp_tlb_entry = current_cpu_data.tlbsize - 1;
index 3978a3d813666f8566159ac20c705c2a5c88df53..d75ff73a20120bf28d919b7b6b5bf30a22b5088f 100644 (file)
@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case tlb_indexed: tlbw = uasm_i_tlbwi; break;
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_exec_hazard) {
                /*
                 * The architecture spec says an ehb is required here,
                 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
                case CPU_PROAPTIV:
                case CPU_P5600:
                case CPU_M5150:
+               case CPU_QEMU_GENERIC:
                        break;
 
                default:
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void)
 
                switch (current_cpu_type()) {
                default:
-                       if (cpu_has_mips_r2) {
+                       if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
 
                case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void)
 
                switch (current_cpu_type()) {
                default:
-                       if (cpu_has_mips_r2) {
+                       if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
 
                case CPU_CAVIUM_OCTEON:
index 8399ddf03a0235c5ce9d5ee28ed5007c9986a8ab..d78178daea4bc2c1e069d2069db976b1ce693740 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifndef CONFIG_CPU_MICROMIPS
-#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
-
 #include "uasm.c"
 
 static struct insn insn_table_MM[] = {
index 8e02291cfc0c1c2d8edeb5733498e877bff09542..b4a8378935625b2e1d0f228f6961ebd8208544f1 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifdef CONFIG_CPU_MICROMIPS
-#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, e)                                      \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << SIMM9_SH                                      \
+        | (e) << FUNC_SH)
 
 #include "uasm.c"
 
@@ -62,7 +62,11 @@ static struct insn insn_table[] = {
        { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
        { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
        { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+#endif
        { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
        { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +89,22 @@ static struct insn insn_table[] = {
        { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+#else
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jalr_op),  RS },
+#endif
        { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
        { insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_lld,  M6(spec3_op, 0, 0, 0, lld6_op),  RS | RT | SIMM9 },
+       { insn_ll,  M6(spec3_op, 0, 0, 0, ll6_op),  RS | RT | SIMM9 },
+#endif
        { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
        { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +117,20 @@ static struct insn insn_table[] = {
        { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
        { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
        { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_pref,  M6(spec3_op, 0, 0, 0, pref6_op),  RS | RT | SIMM9 },
+#endif
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
        { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_scd,  M6(spec3_op, 0, 0, 0, scd6_op),  RS | RT | SIMM9 },
+       { insn_sc,  M6(spec3_op, 0, 0, 0, sc6_op),  RS | RT | SIMM9 },
+#endif
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
@@ -198,6 +220,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
                op |= build_set(va_arg(ap, u32));
        if (ip->fields & SCIMM)
                op |= build_scimm(va_arg(ap, u32));
+       if (ip->fields & SIMM9)
+               op |= build_scimm9(va_arg(ap, u32));
        va_end(ap);
 
        **buf = op;
index 4adf30284813a93aa11143ddbfb433831327e329..319051c34343ae9e0eb7c5d4adf82732e5598cd6 100644 (file)
@@ -24,7 +24,8 @@ enum fields {
        JIMM = 0x080,
        FUNC = 0x100,
        SET = 0x200,
-       SCIMM = 0x400
+       SCIMM = 0x400,
+       SIMM9 = 0x800,
 };
 
 #define OP_MASK                0x3f
@@ -41,6 +42,8 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
+#define SIMM9_SH       7
+#define SIMM9_MASK     0x1ff
 
 enum opcode {
        insn_invalid,
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg)
        return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 
+static inline u32 build_scimm9(s32 arg)
+{
+       WARN((arg > 0xff || arg < -0x100),
+              KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
 static inline u32 build_func(u32 arg)
 {
        WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -330,7 +341,7 @@ I_u3u1u2(_ldx)
 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
                            unsigned int c)
 {
-       if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
+       if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
                /*
                 * As per erratum Core-14449, replace prefetches 0-4,
                 * 6-24 with 'pref 28'.
index ec1dd2491f962da8103e616b3c91b3758e0ddcc9..e1d69895fb1de44f5d8503027f86ebb50f40d5a6 100644 (file)
@@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts)
 int get_c0_perfcount_int(void)
 {
        if (gic_present)
-               return gic_get_c0_compare_int();
+               return gic_get_c0_perfcount_int();
        if (cp0_perfcount_irq >= 0)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
index f2355e3e65a10c4259015c48d273236630102c2d..f97e169393bc3060eb778835cbb1b4813f31b140 100644 (file)
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
 }
 
 struct pci_ops bcm1480_pci_ops = {
-       .read = bcm1480_pcibios_read,
-       .write = bcm1480_pcibios_write,
+       .read   = bcm1480_pcibios_read,
+       .write  = bcm1480_pcibios_write,
 };
 
 static struct resource bcm1480_mem_resource = {
index bedb72bd3a27155fb4068cfb7b9ee69d589f0c22..a04af55d89f10a55593b2b16f53f20c7c16ee95d 100644 (file)
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
 
 
 static struct pci_ops octeon_pci_ops = {
-       .read = octeon_read_config,
-       .write = octeon_write_config,
+       .read   = octeon_read_config,
+       .write  = octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
index eb4a17ba4a530a9a73702bfb6fd6db1890295bb4..1bb0b2bf8d6ea1e411266fd3d8de6a5afc50fba0 100644 (file)
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-       .read = octeon_pcie0_read_config,
-       .write = octeon_pcie0_write_config,
+       .read   = octeon_pcie0_read_config,
+       .write  = octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-       .read = octeon_pcie1_read_config,
-       .write = octeon_pcie1_write_config,
+       .read   = octeon_pcie1_read_config,
+       .write  = octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
 };
 
 static struct pci_ops octeon_dummy_ops = {
-       .read = octeon_dummy_read_config,
-       .write = octeon_dummy_write_config,
+       .read   = octeon_dummy_read_config,
+       .write  = octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
index 6073ca456d110cace96281adf4337f5104c5d22c..4190093d30533d7d8046c9ad5342a28426c70a75 100644 (file)
@@ -36,14 +36,14 @@ config PMC_MSP7120_FPGA
 endchoice
 
 config MSP_HAS_USB
-       boolean
+       bool
        depends on PMC_MSP
 
 config MSP_ETH
-       boolean
+       bool
        select MSP_HAS_MAC
        depends on PMC_MSP
 
 config MSP_HAS_MAC
-       boolean
+       bool
        depends on PMC_MSP
index 8f1b86d4da84a8f4a4f894e50f61a76ef009554f..cdf1876000101e25a153f04415e622820418ba13 100644 (file)
@@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev)
        return 0;
 }
 
-static int gio_device_suspend(struct device *dev, pm_message_t state)
-{
-       struct gio_device *gio_dev = to_gio_device(dev);
-       struct gio_driver *drv = to_gio_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->suspend)
-               error = drv->suspend(gio_dev, state);
-       return error;
-}
-
-static int gio_device_resume(struct device *dev)
-{
-       struct gio_device *gio_dev = to_gio_device(dev);
-       struct gio_driver *drv = to_gio_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->resume)
-               error = drv->resume(gio_dev);
-       return error;
-}
-
 static void gio_device_shutdown(struct device *dev)
 {
        struct gio_device *gio_dev = to_gio_device(dev);
@@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = {
        .match     = gio_bus_match,
        .probe     = gio_device_probe,
        .remove    = gio_device_remove,
-       .suspend   = gio_device_suspend,
-       .resume    = gio_device_resume,
        .shutdown  = gio_device_shutdown,
        .uevent    = gio_device_uevent,
 };
index ac37e54b3d5e7454fc2c78daf88008655d6b8870..e44a15d4f57364be376af2ee4c9c625d13e53371 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
@@ -25,9 +26,9 @@
 #include <asm/sn/gda.h>
 #include <asm/sn/sn0/hub.h>
 
-void machine_restart(char *command) __attribute__((noreturn));
-void machine_halt(void) __attribute__((noreturn));
-void machine_power_off(void) __attribute__((noreturn));
+void machine_restart(char *command) __noreturn;
+void machine_halt(void) __noreturn;
+void machine_power_off(void) __noreturn;
 
 #define noreturn while(1);                             /* Silence gcc.  */
 
index 1f823da4c77bca73980094d4b27a043d24cffaa8..44b3470a0bbb71babcc9af5131d566d22aa317c5 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org>
  */
 
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -35,9 +36,9 @@
 static struct timer_list power_timer, blink_timer, debounce_timer;
 static int has_panicked, shuting_down;
 
-static void ip32_machine_restart(char *command) __attribute__((noreturn));
-static void ip32_machine_halt(void) __attribute__((noreturn));
-static void ip32_machine_power_off(void) __attribute__((noreturn));
+static void ip32_machine_restart(char *command) __noreturn;
+static void ip32_machine_halt(void) __noreturn;
+static void ip32_machine_power_off(void) __noreturn;
 
 static void ip32_machine_restart(char *cmd)
 {
index afab728ab65ecde48b3c50c78e1dd19a795ad99d..96d3f9deb59c38b2172f7b3b7a81c1397ceebad9 100644 (file)
@@ -56,7 +56,9 @@ extern void paging_init(void);
 #define PGDIR_SHIFT    22
 #define PTRS_PER_PGD   1024
 #define PTRS_PER_PUD   1       /* we don't really have any PUD physically */
+#define __PAGETABLE_PUD_FOLDED
 #define PTRS_PER_PMD   1       /* we don't really have any PMD physically */
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PTE   1024
 
 #define PGD_SIZE       PAGE_SIZE
diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c
deleted file mode 100644 (file)
index bd65dae..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* ASB2305 PCI I/O mapping handler
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/pci.h>
-#include <linux/module.h>
-
-/*
- * Create a virtual mapping cookie for a PCI BAR (memory or IO)
- */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-
-       if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
-               if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
-                       return ioremap(start, len);
-               else
-                       return ioremap_nocache(start, len);
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
index ab2e7a198a4cfedfc0bb99249ec597d26333db5b..a6bd07ca3d6c08dc49af6ab4738fe85e1ddcd485 100644 (file)
@@ -192,7 +192,7 @@ struct __large_struct {
 ({                                                             \
        long __gu_err, __gu_val;                                \
        __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;             \
        __gu_err;                                               \
 })
 
@@ -202,7 +202,7 @@ struct __large_struct {
        const __typeof__(*(ptr)) * __gu_addr = (ptr);                   \
        if (access_ok(VERIFY_READ, __gu_addr, size))                    \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                                       \
 })
 
index 91fbb6ee702cb7927e5879576a7926746f4000f8..965a0999fc4c081228a34a61daec7f1ef032a107 100644 (file)
@@ -148,7 +148,7 @@ endef
 # we require gcc 3.3 or above to compile the kernel
 archprepare: checkbin
 checkbin:
-       @if test "$(call cc-version)" -lt "0303"; then \
+       @if test "$(cc-version)" -lt "0303"; then \
                echo -n "Sorry, GCC v3.3 or above is required to build " ; \
                echo "the kernel." ; \
                false ; \
index 8c966b2270aa6692c6f085027a8c70098b447733..15207b9362bfd0947064f7572ad34fdfb6e4d30c 100644 (file)
@@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
 #if PT_NLEVELS == 3
 #define BITS_PER_PMD   (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
+#define __PAGETABLE_PMD_FOLDED
 #define BITS_PER_PMD   0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
index 132d9c681d6ae275ca6129a20fee4b9e55487840..fc502e042438c237c3c32cfd3fbfd13eb1d057c9 100644 (file)
@@ -314,7 +314,7 @@ TOUT        := .tmp_gas_check
 # - Require gcc 4.0 or above on 64-bit
 # - gcc-4.2.0 has issues compiling modules on 64-bit
 checkbin:
-       @if test "$(call cc-version)" = "0304" ; then \
+       @if test "$(cc-version)" = "0304" ; then \
                if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
                        echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
                        echo 'correctly with gcc-3.4 and your version of binutils.'; \
@@ -322,13 +322,13 @@ checkbin:
                        false; \
                fi ; \
        fi
-       @if test "$(call cc-version)" -lt "0400" \
+       @if test "$(cc-version)" -lt "0400" \
            && test "x${CONFIG_PPC64}" = "xy" ; then \
                 echo -n "Sorry, GCC v4.0 or above is required to build " ; \
                 echo "the 64-bit powerpc kernel." ; \
                 false ; \
         fi
-       @if test "$(call cc-fullversion)" = "040200" \
+       @if test "$(cc-fullversion)" = "040200" \
            && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
                echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
                echo 'kernel with modules enabled.' ; \
index 51866f17068457038503ff14520ef9a67e3a0d7d..ca7957b09a3cc13ad6882006468a7e3431e7a75f 100644 (file)
@@ -142,6 +142,7 @@ CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
 CONFIG_STAGING=y
 CONFIG_FSL_CORENET_CF=y
+CONFIG_CLK_QORIQ=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
index d6c0c819895288c39bd826085e7fadb6793b360a..04737aaa8b6b8ead5bfc2574fabe48afd78c872c 100644 (file)
@@ -122,6 +122,7 @@ CONFIG_DMADEVICES=y
 CONFIG_FSL_DMA=y
 CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
+CONFIG_CLK_QORIQ=y
 CONFIG_FSL_CORENET_CF=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
index 7316dd15278a35f60b710d0a348bc0d9328830fb..2d7b33fab953283cf391679b1e316e81f53852f9 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/irq_work.h>
+#include <linux/clk-provider.h>
 #include <asm/trace.h>
 
 #include <asm/io.h>
@@ -975,6 +976,10 @@ void __init time_init(void)
 
        init_decrementer_clockevent();
        tick_setup_hrtimer_broadcast();
+
+#ifdef CONFIG_COMMON_CLK
+       of_clk_init(NULL);
+#endif
 }
 
 
index 6eb614a271fbe07390271aca65d802e37d3a6481..f691bcabd71013e23a09a07bfec61a592e1cd237 100644 (file)
@@ -1168,6 +1168,11 @@ static void mpc5121_clk_provide_backwards_compat(void)
        }
 }
 
+/*
+ * The "fixed-clock" nodes (which includes the oscillator node if the board's
+ * DT provides one) has already been scanned by the of_clk_init() in
+ * time_init().
+ */
 int __init mpc5121_clk_init(void)
 {
        struct device_node *clk_np;
@@ -1186,12 +1191,6 @@ int __init mpc5121_clk_init(void)
        /* invalidate all not yet registered clock slots */
        mpc512x_clk_preset_data();
 
-       /*
-        * have the device tree scanned for "fixed-clock" nodes (which
-        * includes the oscillator node if the board's DT provides one)
-        */
-       of_clk_init(NULL);
-
        /*
         * add a dummy clock for those situations where a clock spec is
         * required yet no real clock is involved
index 4c8008dd938e8d0eb5c4fdc18b35b2cfc90f6807..99824ff8dd354e74ff421a2c9bb59243e045d541 100644 (file)
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
        parent = dentry->d_parent;
        mutex_lock(&parent->d_inode->i_mutex);
        if (hypfs_positive(dentry)) {
-               if (S_ISDIR(dentry->d_inode->i_mode))
+               if (d_is_dir(dentry))
                        simple_rmdir(parent->d_inode, dentry);
                else
                        simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
        return nonseekable_open(inode, filp);
 }
 
-static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t offset)
+static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       char *data;
-       ssize_t ret;
-       struct file *filp = iocb->ki_filp;
-       /* XXX: temporary */
-       char __user *buf = iov[0].iov_base;
-       size_t count = iov[0].iov_len;
-
-       if (nr_segs != 1)
-               return -EINVAL;
-
-       data = filp->private_data;
-       ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
-       if (ret <= 0)
-               return ret;
+       struct file *file = iocb->ki_filp;
+       char *data = file->private_data;
+       size_t available = strlen(data);
+       loff_t pos = iocb->ki_pos;
+       size_t count;
 
-       iocb->ki_pos += ret;
-       file_accessed(filp);
-
-       return ret;
+       if (pos < 0)
+               return -EINVAL;
+       if (pos >= available || !iov_iter_count(to))
+               return 0;
+       count = copy_to_iter(data + pos, available - pos, to);
+       if (!count)
+               return -EFAULT;
+       iocb->ki_pos = pos + count;
+       file_accessed(file);
+       return count;
 }
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t offset)
+
+static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        int rc;
        struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
        struct hypfs_sb_info *fs_info = sb->s_fs_info;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(from);
 
        /*
         * Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
        }
        hypfs_update_update(sb);
        rc = count;
+       iov_iter_advance(from, count);
 out:
        mutex_unlock(&fs_info->lock);
        return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
 static const struct file_operations hypfs_file_ops = {
        .open           = hypfs_open,
        .release        = hypfs_release,
-       .read           = do_sync_read,
-       .write          = do_sync_write,
-       .aio_read       = hypfs_aio_read,
-       .aio_write      = hypfs_aio_write,
+       .read           = new_sync_read,
+       .write          = new_sync_write,
+       .read_iter      = hypfs_read_iter,
+       .write_iter     = hypfs_write_iter,
        .llseek         = no_llseek,
 };
 
index f664e96f48c7bdb8e4174028546fa4d2ce0eca05..1a9a98de5bdebc346c469f55818f08ed33b576e7 100644 (file)
@@ -16,6 +16,7 @@
 struct zpci_iomap_entry {
        u32 fh;
        u8 bar;
+       u16 count;
 };
 
 extern struct zpci_iomap_entry *zpci_iomap_start;
index fbb5ee3ae57c6d21b589a2a3f38437a17a218f2b..e08ec38f8c6eb74b7cf998e18e022db4c206b616 100644 (file)
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask;
  */
 #define PTRS_PER_PTE   256
 #ifndef CONFIG_64BIT
+#define __PAGETABLE_PUD_FOLDED
 #define PTRS_PER_PMD   1
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PUD   1
 #else /* CONFIG_64BIT */
 #define PTRS_PER_PMD   2048
index c4fbb9527c5ca2b553ca98c3f7887ea236fdff1c..b1453a2ae1ca583b2d4a0dc99bc325fd30ddf10d 100644 (file)
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
        cpumask_t book_mask;
 };
 
-extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 
-#define topology_physical_package_id(cpu)      (cpu_topology[cpu].socket_id)
-#define topology_thread_id(cpu)                        (cpu_topology[cpu].thread_id)
-#define topology_thread_cpumask(cpu)           (&cpu_topology[cpu].thread_mask)
-#define topology_core_id(cpu)                  (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu)             (&cpu_topology[cpu].core_mask)
-#define topology_book_id(cpu)                  (cpu_topology[cpu].book_id)
-#define topology_book_cpumask(cpu)             (&cpu_topology[cpu].book_mask)
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu)                  (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu)     (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu)            (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu)            (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).book_mask)
 
 #define mc_capable() 1
 
@@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { }
 #define POLARIZATION_VM                (2)
 #define POLARIZATION_VH                (3)
 
-#ifdef CONFIG_SCHED_BOOK
-void s390_init_cpu_topology(void);
-#else
-static inline void s390_init_cpu_topology(void)
-{
-};
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
index 632fa06ea162c567923044ddf4e3ced0339d24bd..0969d113b3d68f290a95b3b7e1484bf5c6b8caa3 100644 (file)
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
 {
        if (level >= CACHE_MAX_LEVEL)
                return CACHE_TYPE_NOCACHE;
-
        ci += level;
-
        if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
                return CACHE_TYPE_NOCACHE;
-
        return cache_type_map[ci->type];
 }
 
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
 }
 
 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
-                        enum cache_type type, unsigned int level)
+                        enum cache_type type, unsigned int level, int cpu)
 {
        int ti, num_sets;
-       int cpu = smp_processor_id();
 
        if (type == CACHE_TYPE_INST)
                ti = CACHE_TI_INSTRUCTION;
        else
                ti = CACHE_TI_UNIFIED;
-
        this_leaf->level = level + 1;
        this_leaf->type = type;
        this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
-       this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
-                                               level, ti);
+       this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
        this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-
        num_sets = this_leaf->size / this_leaf->coherency_line_size;
        num_sets /= this_leaf->ways_of_associativity;
        this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
 
        if (!this_cpu_ci)
                return -EINVAL;
-
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        do {
                ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
                /* Separate instruction and data caches */
                leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
        } while (++level < CACHE_MAX_LEVEL);
-
        this_cpu_ci->num_levels = level;
        this_cpu_ci->num_leaves = leaves;
-
        return 0;
 }
 
 int populate_cache_leaves(unsigned int cpu)
 {
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
        unsigned int level, idx, pvt;
        union cache_topology ct;
        enum cache_type ctype;
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
-       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
             idx < this_cpu_ci->num_leaves; idx++, level++) {
                if (!this_leaf)
                        return -EINVAL;
-
                pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
                ctype = get_cache_type(&ct.ci[0], level);
                if (ctype == CACHE_TYPE_SEPARATE) {
-                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
-                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
+                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
                } else {
-                       ci_leaf_init(this_leaf++, pvt, ctype, level);
+                       ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
                }
        }
        return 0;
index 70a3294509018e33c49d6b0a383c841d79d2f3af..4427ab7ac23af3e0450e4875b36b83ead687c36d 100644 (file)
@@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
        if (test_facility(129))
                S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-       if (test_facility(128))
-               S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
 #endif
 }
 
-static int __init nocad_setup(char *str)
+static int __init cad_setup(char *str)
 {
-       S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+       int val;
+
+       get_option(&str, &val);
+       if (val && test_facility(128))
+               S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
        return 0;
 }
-early_param("nocad", nocad_setup);
+early_param("cad", cad_setup);
 
 static int __init cad_init(void)
 {
index bfac77ada4f28137b5545d9268d37346eb3af0ff..a5ea8bc17cb3bdd24f085260ca5d03364d509bec 100644 (file)
@@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
        setup_lowcore();
        smp_fill_possible_mask();
         cpu_init();
-       s390_init_cpu_topology();
 
        /*
         * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
index a668993ff577f95d40aff6f474e082374967a79b..db8f1115a3bf5530b82eb53d00931cbe94d6522b 100644 (file)
@@ -59,14 +59,13 @@ enum {
        CPU_STATE_CONFIGURED,
 };
 
+static DEFINE_PER_CPU(struct cpu *, cpu_device);
+
 struct pcpu {
-       struct cpu *cpu;
        struct _lowcore *lowcore;       /* lowcore page(s) for the cpu */
-       unsigned long async_stack;      /* async stack for the cpu */
-       unsigned long panic_stack;      /* panic stack for the cpu */
        unsigned long ec_mask;          /* bit mask for ec_xxx functions */
-       int state;                      /* physical cpu state */
-       int polarization;               /* physical polarization */
+       signed char state;              /* physical cpu state */
+       signed char polarization;       /* physical polarization */
        u16 address;                    /* physical cpu address */
 };
 
@@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
        pcpu_sigp_retry(pcpu, order, 0);
 }
 
+#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 {
+       unsigned long async_stack, panic_stack;
        struct _lowcore *lc;
 
        if (pcpu != &pcpu_devices[0]) {
                pcpu->lowcore = (struct _lowcore *)
                        __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-               pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-               pcpu->panic_stack = __get_free_page(GFP_KERNEL);
-               if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+               async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+               panic_stack = __get_free_page(GFP_KERNEL);
+               if (!pcpu->lowcore || !panic_stack || !async_stack)
                        goto out;
+       } else {
+               async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
+               panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
        }
        lc = pcpu->lowcore;
        memcpy(lc, &S390_lowcore, 512);
        memset((char *) lc + 512, 0, sizeof(*lc) - 512);
-       lc->async_stack = pcpu->async_stack + ASYNC_SIZE
-               - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-       lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
-               - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+       lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
+       lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
        lc->cpu_nr = cpu;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
 #ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        return 0;
 out:
        if (pcpu != &pcpu_devices[0]) {
-               free_page(pcpu->panic_stack);
-               free_pages(pcpu->async_stack, ASYNC_ORDER);
+               free_page(panic_stack);
+               free_pages(async_stack, ASYNC_ORDER);
                free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
        }
        return -ENOMEM;
@@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
 #else
        vdso_free_per_cpu(pcpu->lowcore);
 #endif
-       if (pcpu != &pcpu_devices[0]) {
-               free_page(pcpu->panic_stack);
-               free_pages(pcpu->async_stack, ASYNC_ORDER);
-               free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
-       }
+       if (pcpu == &pcpu_devices[0])
+               return;
+       free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
+       free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
+       free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
        pcpu_delegate(&pcpu_devices[0], func, data,
-                     pcpu_devices->panic_stack + PAGE_SIZE);
+                     pcpu_devices->lowcore->panic_stack -
+                     PANIC_FRAME_OFFSET + PAGE_SIZE);
 }
 
 int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void)
        pcpu->state = CPU_STATE_CONFIGURED;
        pcpu->address = stap();
        pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
-       pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
-               + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-       pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
-               + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
        set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
                          void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
-       struct cpu *c = pcpu_devices[cpu].cpu;
-       struct device *s = &c->dev;
+       struct device *s = &per_cpu(cpu_device, cpu)->dev;
        int err = 0;
 
        switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
                return -ENOMEM;
-       pcpu_devices[cpu].cpu = c;
+       per_cpu(cpu_device, cpu) = c;
        s = &c->dev;
        c->hotpluggable = 1;
        rc = register_cpu(c, cpu);
index 24ee33f1af24228e04686700523819fcb77afdf2..14da43b801d93c2041f2d4964028fc180301eeff 100644 (file)
@@ -7,14 +7,14 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/workqueue.h>
-#include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
 static struct mask_info socket_info;
 static struct mask_info book_info;
 
-struct cpu_topology_s390 cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
+DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
                if (lcpu < 0)
                        continue;
                for (i = 0; i <= smp_cpu_mtid; i++) {
-                       cpu_topology[lcpu + i].book_id = book->id;
-                       cpu_topology[lcpu + i].core_id = rcore;
-                       cpu_topology[lcpu + i].thread_id = lcpu + i;
+                       per_cpu(cpu_topology, lcpu + i).book_id = book->id;
+                       per_cpu(cpu_topology, lcpu + i).core_id = rcore;
+                       per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
                        cpumask_set_cpu(lcpu + i, &book->mask);
                        cpumask_set_cpu(lcpu + i, &socket->mask);
                        if (one_socket_per_cpu)
-                               cpu_topology[lcpu + i].socket_id = rcore;
+                               per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
                        else
-                               cpu_topology[lcpu + i].socket_id = socket->id;
+                               per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
                        smp_cpu_set_polarization(lcpu + i, tl_core->pp);
                }
                if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
 
        spin_lock_irqsave(&topology_lock, flags);
        for_each_possible_cpu(cpu) {
-               cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
-               cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
-               cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+               per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
+               per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
+               per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
                if (!MACHINE_HAS_TOPOLOGY) {
-                       cpu_topology[cpu].thread_id = cpu;
-                       cpu_topology[cpu].core_id = cpu;
-                       cpu_topology[cpu].socket_id = cpu;
-                       cpu_topology[cpu].book_id = cpu;
+                       per_cpu(cpu_topology, cpu).thread_id = cpu;
+                       per_cpu(cpu_topology, cpu).core_id = cpu;
+                       per_cpu(cpu_topology, cpu).socket_id = cpu;
+                       per_cpu(cpu_topology, cpu).book_id = cpu;
                }
        }
        spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@ void topology_expect_change(void)
        set_topology_timer();
 }
 
-static int __init early_parse_topology(char *p)
-{
-       if (strncmp(p, "off", 3))
-               return 0;
-       topology_enabled = 0;
-       return 0;
-}
-early_param("topology", early_parse_topology);
-
-static void __init alloc_masks(struct sysinfo_15_1_x *info,
-                              struct mask_info *mask, int offset)
-{
-       int i, nr_masks;
-
-       nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
-       for (i = 0; i < info->mnest - offset; i++)
-               nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
-       nr_masks = max(nr_masks, 1);
-       for (i = 0; i < nr_masks; i++) {
-               mask->next = alloc_bootmem_align(
-                       roundup_pow_of_two(sizeof(struct mask_info)),
-                       roundup_pow_of_two(sizeof(struct mask_info)));
-               mask = mask->next;
-       }
-}
-
-void __init s390_init_cpu_topology(void)
-{
-       struct sysinfo_15_1_x *info;
-       int i;
-
-       if (!MACHINE_HAS_TOPOLOGY)
-               return;
-       tl_info = alloc_bootmem_pages(PAGE_SIZE);
-       info = tl_info;
-       store_topology(info);
-       pr_info("The CPU configuration topology of the machine is:");
-       for (i = 0; i < TOPOLOGY_NR_MAG; i++)
-               printk(KERN_CONT " %d", info->mag[i]);
-       printk(KERN_CONT " / %d\n", info->mnest);
-       alloc_masks(info, &socket_info, 1);
-       alloc_masks(info, &book_info, 2);
-}
-
 static int cpu_management;
 
 static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu)
 
 const struct cpumask *cpu_thread_mask(int cpu)
 {
-       return &cpu_topology[cpu].thread_mask;
+       return &per_cpu(cpu_topology, cpu).thread_mask;
 }
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       return &cpu_topology[cpu].core_mask;
+       return &per_cpu(cpu_topology, cpu).core_mask;
 }
 
 static const struct cpumask *cpu_book_mask(int cpu)
 {
-       return &cpu_topology[cpu].book_mask;
+       return &per_cpu(cpu_topology, cpu).book_mask;
 }
 
+static int __init early_parse_topology(char *p)
+{
+       if (strncmp(p, "off", 3))
+               return 0;
+       topology_enabled = 0;
+       return 0;
+}
+early_param("topology", early_parse_topology);
+
 static struct sched_domain_topology_level s390_topology[] = {
        { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
        { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = {
        { NULL, },
 };
 
+static void __init alloc_masks(struct sysinfo_15_1_x *info,
+                              struct mask_info *mask, int offset)
+{
+       int i, nr_masks;
+
+       nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
+       for (i = 0; i < info->mnest - offset; i++)
+               nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
+       nr_masks = max(nr_masks, 1);
+       for (i = 0; i < nr_masks; i++) {
+               mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+               mask = mask->next;
+       }
+}
+
+static int __init s390_topology_init(void)
+{
+       struct sysinfo_15_1_x *info;
+       int i;
+
+       if (!MACHINE_HAS_TOPOLOGY)
+               return 0;
+       tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+       info = tl_info;
+       store_topology(info);
+       pr_info("The CPU configuration topology of the machine is:");
+       for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+               printk(KERN_CONT " %d", info->mag[i]);
+       printk(KERN_CONT " / %d\n", info->mnest);
+       alloc_masks(info, &socket_info, 1);
+       alloc_masks(info, &book_info, 2);
+       set_sched_topology(s390_topology);
+       return 0;
+}
+early_initcall(s390_topology_init);
+
 static int __init topology_init(void)
 {
        if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@ static int __init topology_init(void)
        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);
-
-static int __init early_topology_init(void)
-{
-       set_sched_topology(s390_topology);
-       return 0;
-}
-early_initcall(early_topology_init);
index 7699e735ae28ed726f4725ee97619119e1aa5e1e..61541fb93dc63e0f4673dcfe12e9d69681eb0993 100644 (file)
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
        je      4f
        cghi    %r2,__CLOCK_REALTIME
        je      5f
-       cghi    %r2,__CLOCK_THREAD_CPUTIME_ID
-       je      9f
-       cghi    %r2,-2          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
+       cghi    %r2,-3          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
        je      9f
        cghi    %r2,__CLOCK_MONOTONIC_COARSE
        je      3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
        aghi    %r15,16
        br      %r14
 
-       /* CLOCK_THREAD_CPUTIME_ID for this thread */
+       /* CPUCLOCK_VIRT for this thread */
 9:     icm     %r0,15,__VDSO_ECTG_OK(%r5)
        jz      12f
        ear     %r2,%a4
index d008f638b2cd27f7615c11a02bb5d2a8f3138d04..179a2c20b01f143a51a897d9b85e33198bf881a7 100644 (file)
@@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void)
 {
        unsigned long base;
 
-       base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+       base = STACK_TOP / 3 * 2;
+       if (!is_32bit_task())
+               /* Align to 4GB */
+               base &= ~((1UL << 32) - 1);
        return base + mmap_rnd();
 }
 
index 3290f11ae1d9cadb3321bf7f4ace265a9176ab33..753a5673195112051667031bfee73921e149eef1 100644 (file)
@@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
 }
 
 /* Create a virtual mapping cookie for a PCI BAR */
-void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
+void __iomem *pci_iomap_range(struct pci_dev *pdev,
+                             int bar,
+                             unsigned long offset,
+                             unsigned long max)
 {
        struct zpci_dev *zdev = get_zdev(pdev);
        u64 addr;
@@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
 
        idx = zdev->bars[bar].map_idx;
        spin_lock(&zpci_iomap_lock);
-       zpci_iomap_start[idx].fh = zdev->fh;
-       zpci_iomap_start[idx].bar = bar;
+       if (zpci_iomap_start[idx].count++) {
+               BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
+                      zpci_iomap_start[idx].bar != bar);
+       } else {
+               zpci_iomap_start[idx].fh = zdev->fh;
+               zpci_iomap_start[idx].bar = bar;
+       }
+       /* Detect overrun */
+       BUG_ON(!zpci_iomap_start[idx].count);
        spin_unlock(&zpci_iomap_lock);
 
        addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
-       return (void __iomem *) addr;
+       return (void __iomem *) addr + offset;
 }
-EXPORT_SYMBOL_GPL(pci_iomap);
+EXPORT_SYMBOL_GPL(pci_iomap_range);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+       return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
 
 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 {
@@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 
        idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
        spin_lock(&zpci_iomap_lock);
-       zpci_iomap_start[idx].fh = 0;
-       zpci_iomap_start[idx].bar = 0;
+       /* Detect underrun */
+       BUG_ON(!zpci_iomap_start[idx].count);
+       if (!--zpci_iomap_start[idx].count) {
+               zpci_iomap_start[idx].fh = 0;
+               zpci_iomap_start[idx].bar = 0;
+       }
        spin_unlock(&zpci_iomap_lock);
 }
 EXPORT_SYMBOL_GPL(pci_iounmap);
index 5e2725f4ac4901dd6be34c567417bfc06e327824..ff795d3a6909f8f5a441237d71efd88be048e101 100644 (file)
@@ -23,7 +23,7 @@ typedef struct {
 #define USER_DS                KERNEL_DS
 #endif
 
-#define segment_eq(a,b)        ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)
 
 #define get_ds()       (KERNEL_DS)
 
index 9486376605f4dc21c5c6a36f1a84df06810b8f28..a49635c512665ddc6b9e70e5d944c57e26d349d6 100644 (file)
@@ -60,7 +60,7 @@ struct __large_struct { unsigned long buf[100]; };
        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);     \
        __chk_user_ptr(ptr);                                    \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;             \
        __gu_err;                                               \
 })
 
@@ -71,7 +71,7 @@ struct __large_struct { unsigned long buf[100]; };
        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
        if (likely(access_ok(VERIFY_READ, __gu_addr, (size))))          \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                                       \
 })
 
index 2e07e0f40c6af3038ca6b69fecfefefe528dc2b8..c01376c76b868727b55bd117551cc1b96b93d8f1 100644 (file)
@@ -59,19 +59,19 @@ do {                                                                \
        switch (size) {                                         \
        case 1:                                                 \
                retval = __put_user_asm_b((void *)&x,           \
-                                         (long)ptr);           \
+                                         (__force long)ptr);   \
                break;                                          \
        case 2:                                                 \
                retval = __put_user_asm_w((void *)&x,           \
-                                         (long)ptr);           \
+                                         (__force long)ptr);   \
                break;                                          \
        case 4:                                                 \
                retval = __put_user_asm_l((void *)&x,           \
-                                         (long)ptr);           \
+                                         (__force long)ptr);   \
                break;                                          \
        case 8:                                                 \
                retval = __put_user_asm_q((void *)&x,           \
-                                         (long)ptr);           \
+                                         (__force long)ptr);   \
                break;                                          \
        default:                                                \
                __put_user_unknown();                           \
index 9634d086fc562f1c128bddb08961d489526abdc1..64ee103dc29da142305d93d26fd44cf4a62ae699 100644 (file)
@@ -37,7 +37,7 @@
 #define get_fs()       (current->thread.current_ds)
 #define set_fs(val)    ((current->thread.current_ds) = (val))
 
-#define segment_eq(a,b)        ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)
 
 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
  * can be fairly lightweight.
@@ -46,8 +46,8 @@
  */
 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
-#define access_ok(type, addr, size)                                    \
+#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
+#define access_ok(type, addr, size) \
        ({ (void)(type); __access_ok((unsigned long)(addr), size); })
 
 /*
@@ -91,158 +91,221 @@ void __ret_efault(void);
  * of a performance impact. Thus we have a few rather ugly macros here,
  * and hide all the ugliness from the user.
  */
-#define put_user(x,ptr) ({ \
-unsigned long __pu_addr = (unsigned long)(ptr); \
-__chk_user_ptr(ptr); \
-__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
-
-#define get_user(x,ptr) ({ \
-unsigned long __gu_addr = (unsigned long)(ptr); \
-__chk_user_ptr(ptr); \
-__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
+#define put_user(x, ptr) ({ \
+       unsigned long __pu_addr = (unsigned long)(ptr); \
+       __chk_user_ptr(ptr); \
+       __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
+})
+
+#define get_user(x, ptr) ({ \
+       unsigned long __gu_addr = (unsigned long)(ptr); \
+       __chk_user_ptr(ptr); \
+       __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
+})
 
 /*
  * The "__xxx" versions do not do address space checking, useful when
  * doing multiple accesses to the same area (the user has to do the
  * checks by hand with "access_ok()")
  */
-#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
+#define __put_user(x, ptr) \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+    __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
 
 struct __large_struct { unsigned long buf[100]; };
 #define __m(x) ((struct __large_struct __user *)(x))
 
-#define __put_user_check(x,addr,size) ({ \
-register int __pu_ret; \
-if (__access_ok(addr,size)) { \
-switch (size) { \
-case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
-case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
-case 4: __put_user_asm(x,,addr,__pu_ret); break; \
-case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
-default: __pu_ret = __put_user_bad(); break; \
-} } else { __pu_ret = -EFAULT; } __pu_ret; })
-
-#define __put_user_nocheck(x,addr,size) ({ \
-register int __pu_ret; \
-switch (size) { \
-case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
-case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
-case 4: __put_user_asm(x,,addr,__pu_ret); break; \
-case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
-default: __pu_ret = __put_user_bad(); break; \
-} __pu_ret; })
-
-#define __put_user_asm(x,size,addr,ret)                                        \
+#define __put_user_check(x, addr, size) ({ \
+       register int __pu_ret; \
+       if (__access_ok(addr, size)) { \
+               switch (size) { \
+               case 1: \
+                       __put_user_asm(x, b, addr, __pu_ret); \
+                       break; \
+               case 2: \
+                       __put_user_asm(x, h, addr, __pu_ret); \
+                       break; \
+               case 4: \
+                       __put_user_asm(x, , addr, __pu_ret); \
+                       break; \
+               case 8: \
+                       __put_user_asm(x, d, addr, __pu_ret); \
+                       break; \
+               default: \
+                       __pu_ret = __put_user_bad(); \
+                       break; \
+               } \
+       } else { \
+               __pu_ret = -EFAULT; \
+       } \
+       __pu_ret; \
+})
+
+#define __put_user_nocheck(x, addr, size) ({                   \
+       register int __pu_ret;                                  \
+       switch (size) {                                         \
+       case 1: __put_user_asm(x, b, addr, __pu_ret); break;    \
+       case 2: __put_user_asm(x, h, addr, __pu_ret); break;    \
+       case 4: __put_user_asm(x, , addr, __pu_ret); break;     \
+       case 8: __put_user_asm(x, d, addr, __pu_ret); break;    \
+       default: __pu_ret = __put_user_bad(); break;            \
+       } \
+       __pu_ret; \
+})
+
+#define __put_user_asm(x, size, addr, ret)                             \
 __asm__ __volatile__(                                                  \
-       "/* Put user asm, inline. */\n"                                 \
-"1:\t" "st"#size " %1, %2\n\t"                                         \
-       "clr    %0\n"                                                   \
-"2:\n\n\t"                                                             \
-       ".section .fixup,#alloc,#execinstr\n\t"                         \
-       ".align 4\n"                                                    \
-"3:\n\t"                                                               \
-       "b      2b\n\t"                                                 \
-       " mov   %3, %0\n\t"                                             \
-        ".previous\n\n\t"                                              \
-       ".section __ex_table,#alloc\n\t"                                \
-       ".align 4\n\t"                                                  \
-       ".word  1b, 3b\n\t"                                             \
-       ".previous\n\n\t"                                               \
-       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),                      \
-        "i" (-EFAULT))
+               "/* Put user asm, inline. */\n"                         \
+       "1:\t"  "st"#size " %1, %2\n\t"                                 \
+               "clr    %0\n"                                           \
+       "2:\n\n\t"                                                      \
+               ".section .fixup,#alloc,#execinstr\n\t"                 \
+               ".align 4\n"                                            \
+       "3:\n\t"                                                        \
+               "b      2b\n\t"                                         \
+               " mov   %3, %0\n\t"                                     \
+               ".previous\n\n\t"                                       \
+               ".section __ex_table,#alloc\n\t"                        \
+               ".align 4\n\t"                                          \
+               ".word  1b, 3b\n\t"                                     \
+               ".previous\n\n\t"                                       \
+              : "=&r" (ret) : "r" (x), "m" (*__m(addr)),               \
+                "i" (-EFAULT))
 
 int __put_user_bad(void);
 
-#define __get_user_check(x,addr,size,type) ({ \
-register int __gu_ret; \
-register unsigned long __gu_val; \
-if (__access_ok(addr,size)) { \
-switch (size) { \
-case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
-case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
-case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
-case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
-default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
-} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
-
-#define __get_user_check_ret(x,addr,size,type,retval) ({ \
-register unsigned long __gu_val __asm__ ("l1"); \
-if (__access_ok(addr,size)) { \
-switch (size) { \
-case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
-case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
-case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
-case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
-default: if (__get_user_bad()) return retval; \
-} x = (type) __gu_val; } else return retval; })
-
-#define __get_user_nocheck(x,addr,size,type) ({ \
-register int __gu_ret; \
-register unsigned long __gu_val; \
-switch (size) { \
-case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
-case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
-case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
-case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
-default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
-} x = (type) __gu_val; __gu_ret; })
-
-#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
-register unsigned long __gu_val __asm__ ("l1"); \
-switch (size) { \
-case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
-case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
-case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
-case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
-default: if (__get_user_bad()) return retval; \
-} x = (type) __gu_val; })
-
-#define __get_user_asm(x,size,addr,ret)                                        \
+#define __get_user_check(x, addr, size, type) ({ \
+       register int __gu_ret; \
+       register unsigned long __gu_val; \
+       if (__access_ok(addr, size)) { \
+               switch (size) { \
+               case 1: \
+                        __get_user_asm(__gu_val, ub, addr, __gu_ret); \
+                       break; \
+               case 2: \
+                       __get_user_asm(__gu_val, uh, addr, __gu_ret); \
+                       break; \
+               case 4: \
+                       __get_user_asm(__gu_val, , addr, __gu_ret); \
+                       break; \
+               case 8: \
+                       __get_user_asm(__gu_val, d, addr, __gu_ret); \
+                       break; \
+               default: \
+                       __gu_val = 0; \
+                       __gu_ret = __get_user_bad(); \
+                       break; \
+               } \
+        } else { \
+                __gu_val = 0; \
+                __gu_ret = -EFAULT; \
+       } \
+       x = (__force type) __gu_val; \
+       __gu_ret; \
+})
+
+#define __get_user_check_ret(x, addr, size, type, retval) ({ \
+       register unsigned long __gu_val __asm__ ("l1"); \
+       if (__access_ok(addr, size)) { \
+               switch (size) { \
+               case 1: \
+                       __get_user_asm_ret(__gu_val, ub, addr, retval); \
+                       break; \
+               case 2: \
+                       __get_user_asm_ret(__gu_val, uh, addr, retval); \
+                       break; \
+               case 4: \
+                       __get_user_asm_ret(__gu_val, , addr, retval); \
+                       break; \
+               case 8: \
+                       __get_user_asm_ret(__gu_val, d, addr, retval); \
+                       break; \
+               default: \
+                       if (__get_user_bad()) \
+                               return retval; \
+               } \
+               x = (__force type) __gu_val; \
+       } else \
+               return retval; \
+})
+
+#define __get_user_nocheck(x, addr, size, type) ({                     \
+       register int __gu_ret;                                          \
+       register unsigned long __gu_val;                                \
+       switch (size) {                                                 \
+       case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;    \
+       case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;    \
+       case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;      \
+       case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;     \
+       default:                                                        \
+               __gu_val = 0;                                           \
+               __gu_ret = __get_user_bad();                            \
+               break;                                                  \
+       }                                                               \
+       x = (__force type) __gu_val;                                    \
+       __gu_ret;                                                       \
+})
+
+#define __get_user_nocheck_ret(x, addr, size, type, retval) ({         \
+       register unsigned long __gu_val __asm__ ("l1");                 \
+       switch (size) {                                                 \
+       case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break;  \
+       case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break;  \
+       case 4: __get_user_asm_ret(__gu_val, , addr, retval);  break;   \
+       case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break;   \
+       default:                                                        \
+               if (__get_user_bad())                                   \
+                       return retval;                                  \
+       }                                                               \
+       x = (__force type) __gu_val;                                    \
+})
+
+#define __get_user_asm(x, size, addr, ret)                             \
 __asm__ __volatile__(                                                  \
-       "/* Get user asm, inline. */\n"                                 \
-"1:\t" "ld"#size " %2, %1\n\t"                                         \
-       "clr    %0\n"                                                   \
-"2:\n\n\t"                                                             \
-       ".section .fixup,#alloc,#execinstr\n\t"                         \
-       ".align 4\n"                                                    \
-"3:\n\t"                                                               \
-       "clr    %1\n\t"                                                 \
-       "b      2b\n\t"                                                 \
-       " mov   %3, %0\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       ".section __ex_table,#alloc\n\t"                                \
-       ".align 4\n\t"                                                  \
-       ".word  1b, 3b\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),                    \
-        "i" (-EFAULT))
-
-#define __get_user_asm_ret(x,size,addr,retval)                         \
+               "/* Get user asm, inline. */\n"                         \
+       "1:\t"  "ld"#size " %2, %1\n\t"                                 \
+               "clr    %0\n"                                           \
+       "2:\n\n\t"                                                      \
+               ".section .fixup,#alloc,#execinstr\n\t"                 \
+               ".align 4\n"                                            \
+       "3:\n\t"                                                        \
+               "clr    %1\n\t"                                         \
+               "b      2b\n\t"                                         \
+               " mov   %3, %0\n\n\t"                                   \
+               ".previous\n\t"                                         \
+               ".section __ex_table,#alloc\n\t"                        \
+               ".align 4\n\t"                                          \
+               ".word  1b, 3b\n\n\t"                                   \
+               ".previous\n\t"                                         \
+              : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),             \
+                "i" (-EFAULT))
+
+#define __get_user_asm_ret(x, size, addr, retval)                      \
 if (__builtin_constant_p(retval) && retval == -EFAULT)                 \
-__asm__ __volatile__(                                                  \
-       "/* Get user asm ret, inline. */\n"                             \
-"1:\t" "ld"#size " %1, %0\n\n\t"                                       \
-       ".section __ex_table,#alloc\n\t"                                \
-       ".align 4\n\t"                                                  \
-       ".word  1b,__ret_efault\n\n\t"                                  \
-       ".previous\n\t"                                                 \
-       : "=&r" (x) : "m" (*__m(addr)));                                        \
+       __asm__ __volatile__(                                           \
+                       "/* Get user asm ret, inline. */\n"             \
+               "1:\t"  "ld"#size " %1, %0\n\n\t"                       \
+                       ".section __ex_table,#alloc\n\t"                \
+                       ".align 4\n\t"                                  \
+                       ".word  1b,__ret_efault\n\n\t"                  \
+                       ".previous\n\t"                                 \
+                      : "=&r" (x) : "m" (*__m(addr)));                 \
 else                                                                   \
-__asm__ __volatile__(                                                  \
-       "/* Get user asm ret, inline. */\n"                             \
-"1:\t" "ld"#size " %1, %0\n\n\t"                                       \
-       ".section .fixup,#alloc,#execinstr\n\t"                         \
-       ".align 4\n"                                                    \
-"3:\n\t"                                                               \
-       "ret\n\t"                                                       \
-       " restore %%g0, %2, %%o0\n\n\t"                                 \
-       ".previous\n\t"                                                 \
-       ".section __ex_table,#alloc\n\t"                                \
-       ".align 4\n\t"                                                  \
-       ".word  1b, 3b\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
+       __asm__ __volatile__(                                           \
+                       "/* Get user asm ret, inline. */\n"             \
+               "1:\t"  "ld"#size " %1, %0\n\n\t"                       \
+                       ".section .fixup,#alloc,#execinstr\n\t"         \
+                       ".align 4\n"                                    \
+               "3:\n\t"                                                \
+                       "ret\n\t"                                       \
+                       " restore %%g0, %2, %%o0\n\n\t"                 \
+                       ".previous\n\t"                                 \
+                       ".section __ex_table,#alloc\n\t"                \
+                       ".align 4\n\t"                                  \
+                       ".word  1b, 3b\n\n\t"                           \
+                       ".previous\n\t"                                 \
+                      : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
 
 int __get_user_bad(void);
 
index c990a5e577f02738f6a9dd119bace01f3e8e68b0..a35194b7dba01043ac4417aa0e33d965f0157b04 100644 (file)
 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
 #define get_ds() (KERNEL_DS)
 
-#define segment_eq(a,b)  ((a).seg == (b).seg)
+#define segment_eq(a, b)  ((a).seg == (b).seg)
 
 #define set_fs(val)                                                            \
 do {                                                                           \
-       current_thread_info()->current_ds =(val).seg;                           \
+       current_thread_info()->current_ds = (val).seg;                          \
        __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
 } while(0)
 
@@ -88,121 +88,135 @@ void __retl_efault(void);
  * of a performance impact. Thus we have a few rather ugly macros here,
  * and hide all the ugliness from the user.
  */
-#define put_user(x,ptr) ({ \
-unsigned long __pu_addr = (unsigned long)(ptr); \
-__chk_user_ptr(ptr); \
-__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
+#define put_user(x, ptr) ({ \
+       unsigned long __pu_addr = (unsigned long)(ptr); \
+       __chk_user_ptr(ptr); \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
+})
 
-#define get_user(x,ptr) ({ \
-unsigned long __gu_addr = (unsigned long)(ptr); \
-__chk_user_ptr(ptr); \
-__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
+#define get_user(x, ptr) ({ \
+       unsigned long __gu_addr = (unsigned long)(ptr); \
+       __chk_user_ptr(ptr); \
+       __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
+})
 
-#define __put_user(x,ptr) put_user(x,ptr)
-#define __get_user(x,ptr) get_user(x,ptr)
+#define __put_user(x, ptr) put_user(x, ptr)
+#define __get_user(x, ptr) get_user(x, ptr)
 
 struct __large_struct { unsigned long buf[100]; };
 #define __m(x) ((struct __large_struct *)(x))
 
-#define __put_user_nocheck(data,addr,size) ({ \
-register int __pu_ret; \
-switch (size) { \
-case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
-case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
-case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
-case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
-default: __pu_ret = __put_user_bad(); break; \
-} __pu_ret; })
-
-#define __put_user_asm(x,size,addr,ret)                                        \
+#define __put_user_nocheck(data, addr, size) ({                        \
+       register int __pu_ret;                                  \
+       switch (size) {                                         \
+       case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
+       case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
+       case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
+       case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
+       default: __pu_ret = __put_user_bad(); break;            \
+       }                                                       \
+       __pu_ret;                                               \
+})
+
+#define __put_user_asm(x, size, addr, ret)                             \
 __asm__ __volatile__(                                                  \
-       "/* Put user asm, inline. */\n"                                 \
-"1:\t" "st"#size "a %1, [%2] %%asi\n\t"                                \
-       "clr    %0\n"                                                   \
-"2:\n\n\t"                                                             \
-       ".section .fixup,#alloc,#execinstr\n\t"                         \
-       ".align 4\n"                                                    \
-"3:\n\t"                                                               \
-       "sethi  %%hi(2b), %0\n\t"                                       \
-       "jmpl   %0 + %%lo(2b), %%g0\n\t"                                \
-       " mov   %3, %0\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       ".section __ex_table,\"a\"\n\t"                                 \
-       ".align 4\n\t"                                                  \
-       ".word  1b, 3b\n\t"                                             \
-       ".previous\n\n\t"                                               \
-       : "=r" (ret) : "r" (x), "r" (__m(addr)),                                \
-        "i" (-EFAULT))
+               "/* Put user asm, inline. */\n"                         \
+       "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                        \
+               "clr    %0\n"                                           \
+       "2:\n\n\t"                                                      \
+               ".section .fixup,#alloc,#execinstr\n\t"                 \
+               ".align 4\n"                                            \
+       "3:\n\t"                                                        \
+               "sethi  %%hi(2b), %0\n\t"                               \
+               "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
+               " mov   %3, %0\n\n\t"                                   \
+               ".previous\n\t"                                         \
+               ".section __ex_table,\"a\"\n\t"                         \
+               ".align 4\n\t"                                          \
+               ".word  1b, 3b\n\t"                                     \
+               ".previous\n\n\t"                                       \
+              : "=r" (ret) : "r" (x), "r" (__m(addr)),                 \
+                "i" (-EFAULT))
 
 int __put_user_bad(void);
 
-#define __get_user_nocheck(data,addr,size,type) ({ \
-register int __gu_ret; \
-register unsigned long __gu_val; \
-switch (size) { \
-case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
-case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
-case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
-case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
-default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
-} data = (type) __gu_val; __gu_ret; })
-
-#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
-register unsigned long __gu_val __asm__ ("l1"); \
-switch (size) { \
-case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
-case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
-case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
-case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
-default: if (__get_user_bad()) return retval; \
-} data = (type) __gu_val; })
-
-#define __get_user_asm(x,size,addr,ret)                                        \
+#define __get_user_nocheck(data, addr, size, type) ({                       \
+       register int __gu_ret;                                               \
+       register unsigned long __gu_val;                                     \
+       switch (size) {                                                      \
+               case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
+               case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
+               case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
+               case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
+               default:                                                     \
+                       __gu_val = 0;                                        \
+                       __gu_ret = __get_user_bad();                         \
+                       break;                                               \
+       }                                                                    \
+       data = (__force type) __gu_val;                                      \
+        __gu_ret;                                                           \
+})
+
+#define __get_user_nocheck_ret(data, addr, size, type, retval) ({      \
+       register unsigned long __gu_val __asm__ ("l1");                 \
+       switch (size) {                                                 \
+       case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break;  \
+       case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break;  \
+       case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break;  \
+       case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break;   \
+       default:                                                        \
+               if (__get_user_bad())                                   \
+                       return retval;                                  \
+       }                                                               \
+       data = (__force type) __gu_val;                                 \
+})
+
+#define __get_user_asm(x, size, addr, ret)                             \
 __asm__ __volatile__(                                                  \
-       "/* Get user asm, inline. */\n"                                 \
-"1:\t" "ld"#size "a [%2] %%asi, %1\n\t"                                \
-       "clr    %0\n"                                                   \
-"2:\n\n\t"                                                             \
-       ".section .fixup,#alloc,#execinstr\n\t"                         \
-       ".align 4\n"                                                    \
-"3:\n\t"                                                               \
-       "sethi  %%hi(2b), %0\n\t"                                       \
-       "clr    %1\n\t"                                                 \
-       "jmpl   %0 + %%lo(2b), %%g0\n\t"                                \
-       " mov   %3, %0\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       ".section __ex_table,\"a\"\n\t"                                 \
-       ".align 4\n\t"                                                  \
-       ".word  1b, 3b\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       : "=r" (ret), "=r" (x) : "r" (__m(addr)),                       \
-        "i" (-EFAULT))
-
-#define __get_user_asm_ret(x,size,addr,retval)                         \
+               "/* Get user asm, inline. */\n"                         \
+       "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                        \
+               "clr    %0\n"                                           \
+       "2:\n\n\t"                                                      \
+               ".section .fixup,#alloc,#execinstr\n\t"                 \
+               ".align 4\n"                                            \
+       "3:\n\t"                                                        \
+               "sethi  %%hi(2b), %0\n\t"                               \
+               "clr    %1\n\t"                                         \
+               "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
+               " mov   %3, %0\n\n\t"                                   \
+               ".previous\n\t"                                         \
+               ".section __ex_table,\"a\"\n\t"                         \
+               ".align 4\n\t"                                          \
+               ".word  1b, 3b\n\n\t"                                   \
+               ".previous\n\t"                                         \
+              : "=r" (ret), "=r" (x) : "r" (__m(addr)),                \
+                "i" (-EFAULT))
+
+#define __get_user_asm_ret(x, size, addr, retval)                      \
 if (__builtin_constant_p(retval) && retval == -EFAULT)                 \
-__asm__ __volatile__(                                                  \
-       "/* Get user asm ret, inline. */\n"                             \
-"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t"                              \
-       ".section __ex_table,\"a\"\n\t"                                 \
-       ".align 4\n\t"                                                  \
-       ".word  1b,__ret_efault\n\n\t"                                  \
-       ".previous\n\t"                                                 \
-       : "=r" (x) : "r" (__m(addr)));                                  \
+       __asm__ __volatile__(                                           \
+               "/* Get user asm ret, inline. */\n"                     \
+       "1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                      \
+               ".section __ex_table,\"a\"\n\t"                         \
+               ".align 4\n\t"                                          \
+               ".word  1b,__ret_efault\n\n\t"                          \
+               ".previous\n\t"                                         \
+              : "=r" (x) : "r" (__m(addr)));                           \
 else                                                                   \
-__asm__ __volatile__(                                                  \
-       "/* Get user asm ret, inline. */\n"                             \
-"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t"                              \
-       ".section .fixup,#alloc,#execinstr\n\t"                         \
-       ".align 4\n"                                                    \
-"3:\n\t"                                                               \
-       "ret\n\t"                                                       \
-       " restore %%g0, %2, %%o0\n\n\t"                                 \
-       ".previous\n\t"                                                 \
-       ".section __ex_table,\"a\"\n\t"                                 \
-       ".align 4\n\t"                                                  \
-       ".word  1b, 3b\n\n\t"                                           \
-       ".previous\n\t"                                                 \
-       : "=r" (x) : "r" (__m(addr)), "i" (retval))
+       __asm__ __volatile__(                                           \
+               "/* Get user asm ret, inline. */\n"                     \
+       "1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                      \
+               ".section .fixup,#alloc,#execinstr\n\t"                 \
+               ".align 4\n"                                            \
+       "3:\n\t"                                                        \
+               "ret\n\t"                                               \
+               " restore %%g0, %2, %%o0\n\n\t"                         \
+               ".previous\n\t"                                         \
+               ".section __ex_table,\"a\"\n\t"                         \
+               ".align 4\n\t"                                          \
+               ".word  1b, 3b\n\n\t"                                   \
+               ".previous\n\t"                                         \
+              : "=r" (x) : "r" (__m(addr)), "i" (retval))
 
 int __get_user_bad(void);
 
index eb1cf898ed3cb51b30416380fc2109669c4b9f77..c2fb8a87dccb2990a794bb8960bfdad85eb9a390 100644 (file)
@@ -488,6 +488,22 @@ config X86_INTEL_MID
          Intel MID platforms are based on an Intel processor and chipset which
          consume less power than most of the x86 derivatives.
 
+config X86_INTEL_QUARK
+       bool "Intel Quark platform support"
+       depends on X86_32
+       depends on X86_EXTENDED_PLATFORM
+       depends on X86_PLATFORM_DEVICES
+       depends on X86_TSC
+       depends on PCI
+       depends on PCI_GOANY
+       depends on X86_IO_APIC
+       select IOSF_MBI
+       select INTEL_IMR
+       ---help---
+         Select to include support for Quark X1000 SoC.
+         Say Y here if you have a Quark based system such as the Arduino
+         compatible Intel Galileo.
+
 config X86_INTEL_LPSS
        bool "Intel Low Power Subsystem Support"
        depends on ACPI
index 61bd2ad94281884f13b70f3bb9e397fdcef9338a..20028da8ae188ce16aaabdc1ef6da2fd1eb5b8f0 100644 (file)
@@ -313,6 +313,19 @@ config DEBUG_NMI_SELFTEST
 
          If unsure, say N.
 
+config DEBUG_IMR_SELFTEST
+       bool "Isolated Memory Region self test"
+       default n
+       depends on INTEL_IMR
+       ---help---
+         This option enables automated sanity testing of the IMR code.
+         Some simple tests are run to verify IMR bounds checking, alignment
+         and overlapping. This option is really only useful if you are
+         debugging an IMR memory map or are modifying the IMR code and want to
+         test your changes.
+
+         If unsure say N here.
+
 config X86_DEBUG_STATIC_CPU_HAS
        bool "Debug alternatives"
        depends on DEBUG_KERNEL
index 36b62bc52638368c750c250a529995cd8cac80cb..95eba554baf9856f515d30150e66a5333ee04e18 100644 (file)
@@ -30,7 +30,7 @@ cflags-y += -ffreestanding
 # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
 # a lot more stack due to the lack of sharing of stacklots.  Also, gcc
 # 4.3.0 needs -funit-at-a-time for extern inline functions.
-KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
+KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \
                        echo $(call cc-option,-fno-unit-at-a-time); \
                        else echo $(call cc-option,-funit-at-a-time); fi ;)
 
index 843feb3eb20bd781bf8df6cd2b004450eb2d1c5e..0a291cdfaf77100117baf53b2e3af75a43a8af4c 100644 (file)
@@ -51,6 +51,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
 vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
        $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
        $(call if_changed,ld)
index bb1376381985edb9f96e49c0a1b0269e56bd0f9e..7083c16cccba0b2b144ea5e03e160ebafe81e855 100644 (file)
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
                LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
 
+struct kaslr_setup_data {
+       __u64 next;
+       __u32 type;
+       __u32 len;
+       __u8 data[1];
+} kaslr_setup_data;
+
 #define I8254_PORT_CONTROL     0x43
 #define I8254_PORT_COUNTER0    0x40
 #define I8254_CMD_READBACK     0xC0
@@ -295,7 +302,29 @@ static unsigned long find_random_addr(unsigned long minimum,
        return slots_fetch_random();
 }
 
-unsigned char *choose_kernel_location(unsigned char *input,
+static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled)
+{
+       struct setup_data *data;
+
+       kaslr_setup_data.type = SETUP_KASLR;
+       kaslr_setup_data.len = 1;
+       kaslr_setup_data.next = 0;
+       kaslr_setup_data.data[0] = enabled;
+
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       if (data)
+               data->next = (unsigned long)&kaslr_setup_data;
+       else
+               params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
+
+}
+
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size)
@@ -306,14 +335,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
 #ifdef CONFIG_HIBERNATION
        if (!cmdline_find_option_bool("kaslr")) {
                debug_putstr("KASLR disabled by default...\n");
+               add_kaslr_setup_data(params, 0);
                goto out;
        }
 #else
        if (cmdline_find_option_bool("nokaslr")) {
                debug_putstr("KASLR disabled by cmdline...\n");
+               add_kaslr_setup_data(params, 0);
                goto out;
        }
 #endif
+       add_kaslr_setup_data(params, 1);
 
        /* Record the various known unsafe memory ranges. */
        mem_avoid_init((unsigned long)input, input_size,
index 7ff3632806b18ec9a48bd0ae88bdc7a9e9dbe091..99494dff2113e5ac63bcc77aaa58bb01b0506fbb 100644 (file)
@@ -3,28 +3,3 @@
 #include <asm/processor-flags.h>
 
 #include "../../platform/efi/efi_stub_64.S"
-
-#ifdef CONFIG_EFI_MIXED
-       .code64
-       .text
-ENTRY(efi64_thunk)
-       push    %rbp
-       push    %rbx
-
-       subq    $16, %rsp
-       leaq    efi_exit32(%rip), %rax
-       movl    %eax, 8(%rsp)
-       leaq    efi_gdt64(%rip), %rax
-       movl    %eax, 4(%rsp)
-       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
-       leaq    efi32_boot_gdt(%rip), %rax
-       movl    %eax, (%rsp)
-
-       call    __efi64_thunk
-
-       addq    $16, %rsp
-       pop     %rbx
-       pop     %rbp
-       ret
-ENDPROC(efi64_thunk)
-#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
new file mode 100644 (file)
index 0000000..630384a
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+ *
+ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+ *
+ * Because this thunking occurs before ExitBootServices() we have to
+ * restore the firmware's 32-bit GDT before we make EFI serivce calls,
+ * since the firmware's 32-bit IDT is still currently installed and it
+ * needs to be able to service interrupts.
+ *
+ * On the plus side, we don't have to worry about mangling 64-bit
+ * addresses into 32-bits because we're executing with an identify
+ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+ * yet.
+ */
+
+#include <linux/linkage.h>
+#include <asm/msr.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+       .code64
+       .text
+ENTRY(efi64_thunk)
+       push    %rbp
+       push    %rbx
+
+       subq    $8, %rsp
+       leaq    efi_exit32(%rip), %rax
+       movl    %eax, 4(%rsp)
+       leaq    efi_gdt64(%rip), %rax
+       movl    %eax, (%rsp)
+       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
+
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       /*
+        * Convert x86-64 ABI params to i386 ABI
+        */
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       sgdt    save_gdt(%rip)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /*
+        * Switch to gdt with 32-bit segments. This is the firmware GDT
+        * that was installed when the kernel started executing. This
+        * pointer was saved at the EFI stub entry point in head_64.S.
+        */
+       leaq    efi32_boot_gdt(%rip), %rax
+       lgdt    (%rax)
+
+       pushq   $__KERNEL_CS
+       leaq    efi_enter32(%rip), %rax
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       lgdt    save_gdt(%rip)
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
+
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       addq    $8, %rsp
+       pop     %rbx
+       pop     %rbp
+       ret
+ENDPROC(efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       /* Reload pgtables */
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       /* Disable paging */
+       movl    %cr0, %eax
+       btrl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+
+       /* Disable long mode via EFER */
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btrl    $_EFER_LME, %eax
+       wrmsr
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       /*
+        * Some firmware will return with interrupts enabled. Be sure to
+        * disable them before we switch GDTs.
+        */
+       cli
+
+       movl    56(%esp), %eax
+       movl    %eax, 2(%eax)
+       lgdtl   (%eax)
+
+       movl    %cr4, %eax
+       btsl    $(X86_CR4_PAE_BIT), %eax
+       movl    %eax, %cr4
+
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btsl    $_EFER_LME, %eax
+       wrmsr
+
+       xorl    %eax, %eax
+       lldt    %ax
+
+       movl    60(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       /* Enable paging */
+       movl    %cr0, %eax
+       btsl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+       .global efi32_boot_gdt
+efi32_boot_gdt:        .word   0
+               .quad   0
+
+save_gdt:      .word   0
+               .quad   0
+func_rt_ptr:   .quad   0
+
+       .global efi_gdt64
+efi_gdt64:
+       .word   efi_gdt64_end - efi_gdt64
+       .long   0                       /* Filled out by user */
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+efi_gdt64_end:
index a950864a64dab3d558197c77bef3c56a07961494..5903089c818f6843b9d1cc7c83507cf0d28a3e26 100644 (file)
@@ -401,7 +401,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
         * the entire decompressed kernel plus relocation table, or the
         * entire decompressed kernel plus .bss and .brk sections.
         */
-       output = choose_kernel_location(input_data, input_len, output,
+       output = choose_kernel_location(real_mode, input_data, input_len,
+                                       output,
                                        output_len > run_size ? output_len
                                                              : run_size);
 
index 04477d68403f1fe6197d82276033ce27338c1bac..ee3576b2666b8139eedf25077ce769da10712c11 100644 (file)
@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
 
 #if CONFIG_RANDOMIZE_BASE
 /* aslr.c */
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size);
@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
 bool has_cpuflag(int flag);
 #else
 static inline
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size)
index 92003f3c8a427b9138796ceef1b76bc237860da3..efc3b22d896eb23b7e37cf9c720065c0b6b0c717 100644 (file)
@@ -213,7 +213,15 @@ void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
 extern int APIC_init_uniprocessor(void);
+
+#ifdef CONFIG_X86_64
+static inline int apic_force_enable(unsigned long addr)
+{
+       return -1;
+}
+#else
 extern int apic_force_enable(unsigned long addr);
+#endif
 
 extern int apic_bsp_setup(bool upmode);
 extern void apic_ap_setup(void);
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
new file mode 100644 (file)
index 0000000..cd2ce40
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * imr.h: Isolated Memory Region API
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _IMR_H
+#define _IMR_H
+
+#include <linux/types.h>
+
+/*
+ * IMR agent access mask bits
+ * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register
+ * definitions.
+ */
+#define IMR_ESRAM_FLUSH                BIT(31)
+#define IMR_CPU_SNOOP          BIT(30)         /* Applicable only to write */
+#define IMR_RMU                        BIT(29)
+#define IMR_VC1_SAI_ID3                BIT(15)
+#define IMR_VC1_SAI_ID2                BIT(14)
+#define IMR_VC1_SAI_ID1                BIT(13)
+#define IMR_VC1_SAI_ID0                BIT(12)
+#define IMR_VC0_SAI_ID3                BIT(11)
+#define IMR_VC0_SAI_ID2                BIT(10)
+#define IMR_VC0_SAI_ID1                BIT(9)
+#define IMR_VC0_SAI_ID0                BIT(8)
+#define IMR_CPU_0              BIT(1)          /* SMM mode */
+#define IMR_CPU                        BIT(0)          /* Non SMM mode */
+#define IMR_ACCESS_NONE                0
+
+/*
+ * Read/Write access-all bits here include some reserved bits
+ * These are the values firmware uses and are accepted by hardware.
+ * The kernel defines read/write access-all in the same way as firmware
+ * in order to have a consistent and crisp definition across firmware,
+ * bootloader and kernel.
+ */
+#define IMR_READ_ACCESS_ALL    0xBFFFFFFF
+#define IMR_WRITE_ACCESS_ALL   0xFFFFFFFF
+
+/* Number of IMRs provided by Quark X1000 SoC */
+#define QUARK_X1000_IMR_MAX    0x08
+#define QUARK_X1000_IMR_REGBASE 0x40
+
+/* IMR alignment bits - only bits 31:10 are checked for IMR validity */
+#define IMR_ALIGN              0x400
+#define IMR_MASK               (IMR_ALIGN - 1)
+
+int imr_add_range(phys_addr_t base, size_t size,
+                 unsigned int rmask, unsigned int wmask, bool lock);
+
+int imr_remove_range(phys_addr_t base, size_t size);
+
+#endif /* _IMR_H */
index 879fd7d33877516fd13d6eb06fc0fc8354350e98..ef01fef3eebc6556f24fe69aff32bf64a907925d 100644 (file)
@@ -16,7 +16,6 @@
 #define LHCALL_SET_PTE         14
 #define LHCALL_SET_PGD         15
 #define LHCALL_LOAD_TLS                16
-#define LHCALL_NOTIFY          17
 #define LHCALL_LOAD_GDT_ENTRY  18
 #define LHCALL_SEND_INTERRUPTS 19
 
index f97fbe3abb67f5059d4e6f0a37261d6113df19de..95e11f79f123c6aadd5ed8888a51e1870405deeb 100644 (file)
@@ -51,6 +51,8 @@ extern int devmem_is_allowed(unsigned long pagenr);
 extern unsigned long max_low_pfn_mapped;
 extern unsigned long max_pfn_mapped;
 
+extern bool kaslr_enabled;
+
 static inline phys_addr_t get_max_mapped(void)
 {
        return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
index 67fc3d2b0aabe6e7b5a5af631b80bf9e8c25a46b..a0c35bf6cb92cf95abe8fc9ffe9f75c4f34b1fca 100644 (file)
@@ -476,12 +476,14 @@ static inline int pmd_present(pmd_t pmd)
  */
 static inline int pte_protnone(pte_t pte)
 {
-       return pte_flags(pte) & _PAGE_PROTNONE;
+       return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+               == _PAGE_PROTNONE;
 }
 
 static inline int pmd_protnone(pmd_t pmd)
 {
-       return pmd_flags(pmd) & _PAGE_PROTNONE;
+       return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+               == _PAGE_PROTNONE;
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
index 7050d864f5207c4fb384672b083a18a6584bdbbe..cf87de3fc39000eb21028ab2597d5187978bda4a 100644 (file)
@@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key);
 
 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
 {
-       set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+       set_bit(0, (volatile unsigned long *)&lock->tickets.head);
 }
 
 #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
 }
 
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
+{
+       return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+                                                       __ticket_t head)
+{
+       if (head & TICKET_SLOWPATH_FLAG) {
+               arch_spinlock_t old, new;
+
+               old.tickets.head = head;
+               new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
+               old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
+               new.tickets.tail = old.tickets.tail;
+
+               /* try to clear slowpath flag when there are no contenders */
+               cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+       }
+}
 
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
-       return lock.tickets.head == lock.tickets.tail;
+       return __tickets_equal(lock.tickets.head, lock.tickets.tail);
 }
 
 /*
@@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
        if (likely(inc.head == inc.tail))
                goto out;
 
-       inc.tail &= ~TICKET_SLOWPATH_FLAG;
        for (;;) {
                unsigned count = SPIN_THRESHOLD;
 
                do {
-                       if (READ_ONCE(lock->tickets.head) == inc.tail)
-                               goto out;
+                       inc.head = READ_ONCE(lock->tickets.head);
+                       if (__tickets_equal(inc.head, inc.tail))
+                               goto clear_slowpath;
                        cpu_relax();
                } while (--count);
                __ticket_lock_spinning(lock, inc.tail);
        }
-out:   barrier();      /* make sure nothing creeps before the lock is taken */
+clear_slowpath:
+       __ticket_check_and_clear_slowpath(lock, inc.head);
+out:
+       barrier();      /* make sure nothing creeps before the lock is taken */
 }
 
 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
        arch_spinlock_t old, new;
 
        old.tickets = READ_ONCE(lock->tickets);
-       if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
+       if (!__tickets_equal(old.tickets.head, old.tickets.tail))
                return 0;
 
        new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
+       new.head_tail &= ~TICKET_SLOWPATH_FLAG;
 
        /* cmpxchg is a full barrier, so nothing can move before it */
        return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
-                                           arch_spinlock_t old)
-{
-       arch_spinlock_t new;
-
-       BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
-
-       /* Perform the unlock on the "before" copy */
-       old.tickets.head += TICKET_LOCK_INC;
-
-       /* Clear the slowpath flag */
-       new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
-
-       /*
-        * If the lock is uncontended, clear the flag - use cmpxchg in
-        * case it changes behind our back though.
-        */
-       if (new.tickets.head != new.tickets.tail ||
-           cmpxchg(&lock->head_tail, old.head_tail,
-                                       new.head_tail) != old.head_tail) {
-               /*
-                * Lock still has someone queued for it, so wake up an
-                * appropriate waiter.
-                */
-               __ticket_unlock_kick(lock, old.tickets.head);
-       }
-}
-
 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        if (TICKET_SLOWPATH_FLAG &&
-           static_key_false(&paravirt_ticketlocks_enabled)) {
-               arch_spinlock_t prev;
+               static_key_false(&paravirt_ticketlocks_enabled)) {
+               __ticket_t head;
 
-               prev = *lock;
-               add_smp(&lock->tickets.head, TICKET_LOCK_INC);
+               BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
 
-               /* add_smp() is a full mb() */
+               head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
 
-               if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
-                       __ticket_unlock_slowpath(lock, prev);
+               if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
+                       head &= ~TICKET_SLOWPATH_FLAG;
+                       __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
+               }
        } else
                __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
 }
@@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
        struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-       return tmp.tail != tmp.head;
+       return !__tickets_equal(tmp.tail, tmp.head);
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
        struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-       return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+       tmp.head &= ~TICKET_SLOWPATH_FLAG;
+       return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
 #define arch_spin_is_contended arch_spin_is_contended
 
@@ -191,8 +189,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                 * We need to check "unlocked" in a loop, tmp.head == head
                 * can be false positive because of overflow.
                 */
-               if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
-                   tmp.head != head)
+               if (__tickets_equal(tmp.head, tmp.tail) ||
+                               !__tickets_equal(tmp.head, head))
                        break;
 
                cpu_relax();
index 0d592e0a5b84fa3c3738ce8400e34df5bb552eb0..ace9dec050b17b1a766899a946ab83d6bbe31641 100644 (file)
@@ -179,7 +179,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
        asm volatile("call __get_user_%P3"                              \
                     : "=a" (__ret_gu), "=r" (__val_gu)                 \
                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
-       (x) = (__typeof__(*(ptr))) __val_gu;                            \
+       (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
        __ret_gu;                                                       \
 })
 
index 225b0988043a0a78ac9092a9af7a265122c685cd..44e6dd7e36a23becd48def85b218b1d70ac938e6 100644 (file)
@@ -7,6 +7,7 @@
 #define SETUP_DTB                      2
 #define SETUP_PCI                      3
 #define SETUP_EFI                      4
+#define SETUP_KASLR                    5
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
index ae97ed0873c6e3f35e28545b521dda9c25c8d74e..3d525c6124f6c720e02d2761b283151be97cf6d7 100644 (file)
@@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 {
        int rc, irq, trigger, polarity;
 
+       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+               *irqp = gsi;
+               return 0;
+       }
+
        rc = acpi_get_override_irq(gsi, &trigger, &polarity);
        if (rc == 0) {
                trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
index b5c8ff5e9dfcad79075a1af5f41b0ac0ee37231b..2346c95c6ab1945077fdde28c129342ffe09749d 100644 (file)
@@ -1396,6 +1396,12 @@ void cpu_init(void)
 
        wait_for_master_cpu(cpu);
 
+       /*
+        * Initialize the CR4 shadow before doing anything that could
+        * try to read it.
+        */
+       cr4_init_shadow();
+
        show_ucode_info_early();
 
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
index 94d7dcb1214530dfa86b4fef2f2bcbf007d32fe4..50163fa9034f0d2f4db28767432a249ad9e0cb35 100644 (file)
@@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = {
        { 0xb2, TLB_INST_4K,            64,     " TLB_INST 4KByte pages, 4-way set associative" },
        { 0xb3, TLB_DATA_4K,            128,    " TLB_DATA 4 KByte pages, 4-way set associative" },
        { 0xb4, TLB_DATA_4K,            256,    " TLB_DATA 4 KByte pages, 4-way associative" },
-       { 0xb5, TLB_INST_4K,            64,     " TLB_INST 4 KByte pages, 8-way set ssociative" },
-       { 0xb6, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 8-way set ssociative" },
+       { 0xb5, TLB_INST_4K,            64,     " TLB_INST 4 KByte pages, 8-way set associative" },
+       { 0xb6, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 8-way set associative" },
        { 0xba, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way associative" },
        { 0xc0, TLB_DATA_4K_4M,         8,      " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
        { 0xc1, STLB_4K_2M,             1024,   " STLB 4 KByte and 2 MByte pages, 8-way associative" },
index c6826d1e8082584268d1b5e8f3abeb176ee61187..746e7fd08aad7082747ee1d9dca80570f1a00e3b 100644 (file)
@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
                struct microcode_header_intel mc_header;
                unsigned int mc_size;
 
+               if (leftover < sizeof(mc_header)) {
+                       pr_err("error! Truncated header in microcode data file\n");
+                       break;
+               }
+
                if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
                        break;
 
index ec9df6f9cd47b35e7f4d6059eb094f922420c5cf..420eb933189ca487110607475ddbf33be8e8267b 100644 (file)
@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
        unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
        int i;
 
-       while (leftover) {
+       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+
+               if (leftover < sizeof(mc_header))
+                       break;
+
                mc_header = (struct microcode_header_intel *)ucode_ptr;
 
                mc_size = get_totalsize(mc_header);
index 000d4199b03e69905527d7972ccd918835e6cc1f..31e2d5bf3e38887ca06402bff6c647b9aa9a3c5c 100644 (file)
@@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback)
 ENTRY(xen_do_upcall)
 1:     mov %esp, %eax
        call xen_evtchn_do_upcall
+#ifndef CONFIG_PREEMPT
+       call xen_maybe_preempt_hcall
+#endif
        jmp  ret_from_intr
        CFI_ENDPROC
 ENDPROC(xen_hypervisor_callback)
index db13655c3a2aff4a4475a9adf6ce1cb5b3639220..10074ad9ebf85ed82e552f055baebfa6bb3169f2 100644 (file)
@@ -1208,6 +1208,9 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        popq %rsp
        CFI_DEF_CFA_REGISTER rsp
        decl PER_CPU_VAR(irq_count)
+#ifndef CONFIG_PREEMPT
+       call xen_maybe_preempt_hcall
+#endif
        jmp  error_exit
        CFI_ENDPROC
 END(xen_do_hypervisor_callback)
index 705ef8d48e2dc464936672fb54eea908f8f03b4e..67b1cbe0093adba1141f8d9ebda29ad34dc9d23e 100644 (file)
@@ -302,6 +302,9 @@ int check_irq_vectors_for_cpu_disable(void)
                irq = __this_cpu_read(vector_irq[vector]);
                if (irq >= 0) {
                        desc = irq_to_desc(irq);
+                       if (!desc)
+                               continue;
+
                        data = irq_desc_get_irq_data(desc);
                        cpumask_copy(&affinity_new, data->affinity);
                        cpu_clear(this_cpu, affinity_new);
index 98f654d466e585167153e58811902675bfeb5baa..4e3d5a9621fe0052fac43d5ad6c109b9d3f54447 100644 (file)
@@ -84,7 +84,7 @@ static volatile u32 twobyte_is_boostable[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
        /*      ----------------------------------------------          */
        W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
-       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
+       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
        W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
        W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
@@ -223,27 +223,48 @@ static unsigned long
 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 {
        struct kprobe *kp;
+       unsigned long faddr;
 
        kp = get_kprobe((void *)addr);
-       /* There is no probe, return original address */
-       if (!kp)
+       faddr = ftrace_location(addr);
+       /*
+        * Addresses inside the ftrace location are refused by
+        * arch_check_ftrace_location(). Something went terribly wrong
+        * if such an address is checked here.
+        */
+       if (WARN_ON(faddr && faddr != addr))
+               return 0UL;
+       /*
+        * Use the current code if it is not modified by Kprobe
+        * and it cannot be modified by ftrace.
+        */
+       if (!kp && !faddr)
                return addr;
 
        /*
-        *  Basically, kp->ainsn.insn has an original instruction.
-        *  However, RIP-relative instruction can not do single-stepping
-        *  at different place, __copy_instruction() tweaks the displacement of
-        *  that instruction. In that case, we can't recover the instruction
-        *  from the kp->ainsn.insn.
+        * Basically, kp->ainsn.insn has an original instruction.
+        * However, RIP-relative instruction can not do single-stepping
+        * at different place, __copy_instruction() tweaks the displacement of
+        * that instruction. In that case, we can't recover the instruction
+        * from the kp->ainsn.insn.
         *
-        *  On the other hand, kp->opcode has a copy of the first byte of
-        *  the probed instruction, which is overwritten by int3. And
-        *  the instruction at kp->addr is not modified by kprobes except
-        *  for the first byte, we can recover the original instruction
-        *  from it and kp->opcode.
+        * On the other hand, in case on normal Kprobe, kp->opcode has a copy
+        * of the first byte of the probed instruction, which is overwritten
+        * by int3. And the instruction at kp->addr is not modified by kprobes
+        * except for the first byte, we can recover the original instruction
+        * from it and kp->opcode.
+        *
+        * In case of Kprobes using ftrace, we do not have a copy of
+        * the original instruction. In fact, the ftrace location might
+        * be modified at anytime and even could be in an inconsistent state.
+        * Fortunately, we know that the original code is the ideal 5-byte
+        * long NOP.
         */
-       memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-       buf[0] = kp->opcode;
+       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (faddr)
+               memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
+       else
+               buf[0] = kp->opcode;
        return (unsigned long)buf;
 }
 
@@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * for preventing to release referencing kprobes.
+ * Returns zero if the instruction can not get recovered.
  */
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 {
@@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
                 * normally used, we just go through if there is no kprobe.
                 */
                __addr = recover_probed_instruction(buf, addr);
+               if (!__addr)
+                       return 0;
                kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
                insn_get_length(&insn);
 
@@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
        unsigned long recovered_insn =
                recover_probed_instruction(buf, (unsigned long)src);
 
+       if (!recovered_insn)
+               return 0;
        kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
        insn_get_length(&insn);
        /* Another subsystem puts a breakpoint, failed to recover */
index 0dd8d089c315e0e9df338e9144799c6e900df831..7b3b9d15c47a63953d6932026cc57db795e3a507 100644 (file)
@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
                         */
                        return 0;
                recovered_insn = recover_probed_instruction(buf, addr);
+               if (!recovered_insn)
+                       return 0;
                kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
                insn_get_length(&insn);
                /* Another subsystem puts a breakpoint */
index 94f6434843008c38b8a091c077e0c94fc36c9a07..e354cc6446aba4286645dc799e48d8539c7005d6 100644 (file)
@@ -609,7 +609,7 @@ static inline void check_zero(void)
        u8 ret;
        u8 old;
 
-       old = ACCESS_ONCE(zero_stats);
+       old = READ_ONCE(zero_stats);
        if (unlikely(old)) {
                ret = cmpxchg(&zero_stats, old, 0);
                /* This ensures only one fellow resets the stat */
@@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
        int cpu;
        u64 start;
        unsigned long flags;
+       __ticket_t head;
 
        if (in_nmi())
                return;
@@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
         */
        __ticket_enter_slowpath(lock);
 
+       /* make sure enter_slowpath, which is atomic does not cross the read */
+       smp_mb__after_atomic();
+
        /*
         * check again make sure it didn't become free while
         * we weren't looking.
         */
-       if (ACCESS_ONCE(lock->tickets.head) == want) {
+       head = READ_ONCE(lock->tickets.head);
+       if (__tickets_equal(head, want)) {
                add_stats(TAKEN_SLOW_PICKUP, 1);
                goto out;
        }
@@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
        add_stats(RELEASED_SLOW, 1);
        for_each_cpu(cpu, &waiting_cpus) {
                const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
-               if (ACCESS_ONCE(w->lock) == lock &&
-                   ACCESS_ONCE(w->want) == ticket) {
+               if (READ_ONCE(w->lock) == lock &&
+                   READ_ONCE(w->want) == ticket) {
                        add_stats(RELEASED_SLOW_KICKED, 1);
                        kvm_kick_cpu(cpu);
                        break;
index d1ac80b72c72184a0b999c2b299b5e265d26de7a..9bbb9b35c144a4f721ed4e7dcfc07aee6abdc2cb 100644 (file)
@@ -47,21 +47,13 @@ do {                                                        \
 
 #ifdef CONFIG_RANDOMIZE_BASE
 static unsigned long module_load_offset;
-static int randomize_modules = 1;
 
 /* Mutex protects the module_load_offset. */
 static DEFINE_MUTEX(module_kaslr_mutex);
 
-static int __init parse_nokaslr(char *p)
-{
-       randomize_modules = 0;
-       return 0;
-}
-early_param("nokaslr", parse_nokaslr);
-
 static unsigned long int get_module_load_offset(void)
 {
-       if (randomize_modules) {
+       if (kaslr_enabled) {
                mutex_lock(&module_kaslr_mutex);
                /*
                 * Calculate the module_load_offset the first time this
index 0a2421cca01fad095bbb7caa8e7c779d910d751b..98dc9317286e1e0fad25f3d10efaa3134a9134c0 100644 (file)
 unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
 
+bool __read_mostly kaslr_enabled = false;
+
 #ifdef CONFIG_DMI
 RESERVE_BRK(dmi_alloc, 65536);
 #endif
@@ -425,6 +427,11 @@ static void __init reserve_initrd(void)
 }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
+{
+       kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
+}
+
 static void __init parse_setup_data(void)
 {
        struct setup_data *data;
@@ -450,6 +457,9 @@ static void __init parse_setup_data(void)
                case SETUP_EFI:
                        parse_efi_setup(pa_data, data_len);
                        break;
+               case SETUP_KASLR:
+                       parse_kaslr_setup(pa_data, data_len);
+                       break;
                default:
                        break;
                }
@@ -832,10 +842,14 @@ static void __init trim_low_memory_range(void)
 static int
 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 {
-       pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
-                "(relocation range: 0x%lx-0x%lx)\n",
-                (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
-                __START_KERNEL_map, MODULES_VADDR-1);
+       if (kaslr_enabled)
+               pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
+                        (unsigned long)&_text - __START_KERNEL,
+                        __START_KERNEL,
+                        __START_KERNEL_map,
+                        MODULES_VADDR-1);
+       else
+               pr_emerg("Kernel Offset: disabled\n");
 
        return 0;
 }
index 8b96a947021ffe0ad3d05397d3fc44b975a98141..81f8adb0679e548d31af5982297c85141e3693f9 100644 (file)
  * Good-instruction tables for 32-bit apps.  This is non-const and volatile
  * to keep gcc from statically optimizing it out, as variable_test_bit makes
  * some versions of gcc to think only *(unsigned long*) is used.
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
+ *     (why we support bound (62) then? it's similar, and similarly unused...)
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * 07,17,1f - pop es/ss/ds
+ *     Normally not used in userspace, but would execute if used.
+ *     Can cause GP or stack exception if tries to load wrong segment descriptor.
+ *     We hesitate to run them under single step since kernel's handling
+ *     of userspace single-stepping (TF flag) is fragile.
+ *     We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
+ *     on the same grounds that they are never used.
+ * cd - int N.
+ *     Used by userspace for "int 80" syscall entry. (Other "int N"
+ *     cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *     Not supported since kernel's handling of userspace single-stepping
+ *     (TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
  */
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 static volatile u32 good_insns_32[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
+       W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
        W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
-       W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+       W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-       W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+       W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
        W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
        W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
        W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
        W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
        W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
-       W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+       W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
        W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-       W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+       W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -94,27 +121,61 @@ static volatile u32 good_insns_32[256 / 32] = {
 #define good_insns_32  NULL
 #endif
 
-/* Good-instruction tables for 64-bit apps */
+/* Good-instruction tables for 64-bit apps.
+ *
+ * Genuinely invalid opcodes:
+ * 06,07 - formerly push/pop es
+ * 0e - formerly push cs
+ * 16,17 - formerly push/pop ss
+ * 1e,1f - formerly push/pop ds
+ * 27,2f,37,3f - formerly daa/das/aaa/aas
+ * 60,61 - formerly pusha/popa
+ * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
+ * 82 - formerly redundant encoding of Group1
+ * 9a - formerly call seg:ofs
+ * ce - formerly into
+ * d4,d5 - formerly aam/aad
+ * d6 - formerly undocumented salc
+ * ea - formerly jmp seg:ofs
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * cd - int N.
+ *     Used by userspace for "int 80" syscall entry. (Other "int N"
+ *     cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *     Not supported since kernel's handling of userspace single-stepping
+ *     (TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
+ */
 #if defined(CONFIG_X86_64)
 static volatile u32 good_insns_64[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
+       W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
        W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
-       W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
-       W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
+       W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
+       W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-       W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+       W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
        W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
-       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
        W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
        W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
-       W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
+       W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
        W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
-       W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-       W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+       W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
+       W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -122,49 +183,55 @@ static volatile u32 good_insns_64[256 / 32] = {
 #define good_insns_64  NULL
 #endif
 
-/* Using this for both 64-bit and 32-bit apps */
+/* Using this for both 64-bit and 32-bit apps.
+ * Opcodes we don't support:
+ * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
+ * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
+ *     Also encodes tons of other system insns if mod=11.
+ *     Some are in fact non-system: xend, xtest, rdtscp, maybe more
+ * 0f 05 - syscall
+ * 0f 06 - clts (CPL0 insn)
+ * 0f 07 - sysret
+ * 0f 08 - invd (CPL0 insn)
+ * 0f 09 - wbinvd (CPL0 insn)
+ * 0f 0b - ud2
+ * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
+ * 0f 34 - sysenter
+ * 0f 35 - sysexit
+ * 0f 37 - getsec
+ * 0f 78 - vmread (Intel VMX. CPL0 insn)
+ * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
+ *     Note: with prefixes, these two opcodes are
+ *     extrq/insertq/AVX512 convert vector ops.
+ * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
+ *     {rd,wr}{fs,gs}base,{s,l,m}fence.
+ *     Why? They are all user-executable.
+ */
 static volatile u32 good_2byte_insns[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
-       W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
-       W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+       W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
+       W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+       W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
        W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
-       W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+       W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
        W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
-       W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
-       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+       W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
+       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
        W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
-       W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+       W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
        W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
-       W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* f0 */
+       W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
 #undef W
 
 /*
- * opcodes we'll probably never support:
- *
- *  6c-6d, e4-e5, ec-ed - in
- *  6e-6f, e6-e7, ee-ef - out
- *  cc, cd - int3, int
- *  cf - iret
- *  d6 - illegal instruction
- *  f1 - int1/icebp
- *  f4 - hlt
- *  fa, fb - cli, sti
- *  0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
- *
- * invalid opcodes in 64-bit mode:
- *
- *  06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
- *  63 - we support this opcode in x86_64 but not in i386.
- *
  * opcodes we may need to refine support for:
  *
  *  0f - 2-byte instructions: For many of these instructions, the validity
index 4a0890f815c40dc2641d092ff7aeaee5cb6f0039..08f41caada45fae631599fde62f9b0d43902b7cd 100644 (file)
@@ -1,6 +1,6 @@
 config LGUEST_GUEST
        bool "Lguest guest support"
-       depends on X86_32 && PARAVIRT
+       depends on X86_32 && PARAVIRT && PCI
        select TTY
        select VIRTUALIZATION
        select VIRTIO
@@ -8,7 +8,7 @@ config LGUEST_GUEST
        help
          Lguest is a tiny in-kernel hypervisor.  Selecting this will
          allow your kernel to boot under lguest.  This option will increase
-         your kernel size by about 6k.  If in doubt, say N.
+         your kernel size by about 10k.  If in doubt, say N.
 
          If you say Y here, make sure you say Y (or M) to the virtio block
          and net drivers which lguest needs.
index c1c1544b84859e9675ad71ac85cab1568994b623..ac4453d8520efd5e2080ef6f29cfd7da7b154d61 100644 (file)
@@ -56,6 +56,9 @@
 #include <linux/virtio_console.h>
 #include <linux/pm.h>
 #include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+#include <asm/acpi.h>
 #include <asm/apic.h>
 #include <asm/lguest.h>
 #include <asm/paravirt.h>
@@ -71,6 +74,8 @@
 #include <asm/stackprotector.h>
 #include <asm/reboot.h>                /* for struct machine_ops */
 #include <asm/kvm_para.h>
+#include <asm/pci_x86.h>
+#include <asm/pci-direct.h>
 
 /*G:010
  * Welcome to the Guest!
@@ -831,6 +836,24 @@ static struct irq_chip lguest_irq_controller = {
        .irq_unmask     = enable_lguest_irq,
 };
 
+static int lguest_enable_irq(struct pci_dev *dev)
+{
+       u8 line = 0;
+
+       /* We literally use the PCI interrupt line as the irq number. */
+       pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
+       irq_set_chip_and_handler_name(line, &lguest_irq_controller,
+                                     handle_level_irq, "level");
+       dev->irq = line;
+       return 0;
+}
+
+/* We don't do hotplug PCI, so this shouldn't be called. */
+static void lguest_disable_irq(struct pci_dev *dev)
+{
+       WARN_ON(1);
+}
+
 /*
  * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
  * interrupt (except 128, which is used for system calls), and then tells the
@@ -1181,25 +1204,136 @@ static __init char *lguest_memory_setup(void)
        return "LGUEST";
 }
 
+/* Offset within PCI config space of BAR access capability. */
+static int console_cfg_offset = 0;
+static int console_access_cap;
+
+/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */
+static void set_cfg_window(u32 cfg_offset, u32 off)
+{
+       write_pci_config_byte(0, 1, 0,
+                             cfg_offset + offsetof(struct virtio_pci_cap, bar),
+                             0);
+       write_pci_config(0, 1, 0,
+                        cfg_offset + offsetof(struct virtio_pci_cap, length),
+                        4);
+       write_pci_config(0, 1, 0,
+                        cfg_offset + offsetof(struct virtio_pci_cap, offset),
+                        off);
+}
+
+static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
+{
+       /*
+        * We could set this up once, then leave it; nothing else in the *
+        * kernel should touch these registers.  But if it went wrong, that
+        * would be a horrible bug to find.
+        */
+       set_cfg_window(cfg_offset, off);
+       write_pci_config(0, 1, 0,
+                        cfg_offset + sizeof(struct virtio_pci_cap), val);
+}
+
+static void probe_pci_console(void)
+{
+       u8 cap, common_cap = 0, device_cap = 0;
+       /* Offset within BAR0 */
+       u32 device_offset;
+       u32 device_len;
+
+       /* Avoid recursive printk into here. */
+       console_cfg_offset = -1;
+
+       if (!early_pci_allowed()) {
+               printk(KERN_ERR "lguest: early PCI access not allowed!\n");
+               return;
+       }
+
+       /* We expect a console PCI device at BUS0, slot 1. */
+       if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) {
+               printk(KERN_ERR "lguest: PCI device is %#x!\n",
+                      read_pci_config(0, 1, 0, 0));
+               return;
+       }
+
+       /* Find the capabilities we need (must be in bar0) */
+       cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST);
+       while (cap) {
+               u8 vndr = read_pci_config_byte(0, 1, 0, cap);
+               if (vndr == PCI_CAP_ID_VNDR) {
+                       u8 type, bar;
+                       u32 offset, length;
+
+                       type = read_pci_config_byte(0, 1, 0,
+                           cap + offsetof(struct virtio_pci_cap, cfg_type));
+                       bar = read_pci_config_byte(0, 1, 0,
+                           cap + offsetof(struct virtio_pci_cap, bar));
+                       offset = read_pci_config(0, 1, 0,
+                           cap + offsetof(struct virtio_pci_cap, offset));
+                       length = read_pci_config(0, 1, 0,
+                           cap + offsetof(struct virtio_pci_cap, length));
+
+                       switch (type) {
+                       case VIRTIO_PCI_CAP_DEVICE_CFG:
+                               if (bar == 0) {
+                                       device_cap = cap;
+                                       device_offset = offset;
+                                       device_len = length;
+                               }
+                               break;
+                       case VIRTIO_PCI_CAP_PCI_CFG:
+                               console_access_cap = cap;
+                               break;
+                       }
+               }
+               cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT);
+       }
+       if (!device_cap || !console_access_cap) {
+               printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n",
+                      common_cap, device_cap, console_access_cap);
+               return;
+       }
+
+       /*
+        * Note that we can't check features, until we've set the DRIVER
+        * status bit.  We don't want to do that until we have a real driver,
+        * so we just check that the device-specific config has room for
+        * emerg_wr.  If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
+        * it should ignore the access.
+        */
+       if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
+                         + sizeof(u32))) {
+               printk(KERN_ERR "lguest: console missing emerg_wr field\n");
+               return;
+       }
+
+       console_cfg_offset = device_offset;
+       printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
+}
+
 /*
  * We will eventually use the virtio console device to produce console output,
- * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
- * console output.
+ * but before that is set up we use the virtio PCI console's backdoor mmio
+ * access and the "emergency" write facility (which is legal even before the
+ * device is configured).
  */
 static __init int early_put_chars(u32 vtermno, const char *buf, int count)
 {
-       char scratch[17];
-       unsigned int len = count;
+       /* If we couldn't find PCI console, forget it. */
+       if (console_cfg_offset < 0)
+               return count;
 
-       /* We use a nul-terminated string, so we make a copy.  Icky, huh? */
-       if (len > sizeof(scratch) - 1)
-               len = sizeof(scratch) - 1;
-       scratch[len] = '\0';
-       memcpy(scratch, buf, len);
-       hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
+       if (unlikely(!console_cfg_offset)) {
+               probe_pci_console();
+               if (console_cfg_offset < 0)
+                       return count;
+       }
 
-       /* This routine returns the number of bytes actually written. */
-       return len;
+       write_bar_via_cfg(console_access_cap,
+                         console_cfg_offset
+                         + offsetof(struct virtio_console_config, emerg_wr),
+                         buf[0]);
+       return 1;
 }
 
 /*
@@ -1399,14 +1533,6 @@ __init void lguest_init(void)
        /* Hook in our special panic hypercall code. */
        atomic_notifier_chain_register(&panic_notifier_list, &paniced);
 
-       /*
-        * The IDE code spends about 3 seconds probing for disks: if we reserve
-        * all the I/O ports up front it can't get them and so doesn't probe.
-        * Other device drivers are similar (but less severe).  This cuts the
-        * kernel boot time on my machine from 4.1 seconds to 0.45 seconds.
-        */
-       paravirt_disable_iospace();
-
        /*
         * This is messy CPU setup stuff which the native boot code does before
         * start_kernel, so we have to do, too:
@@ -1436,6 +1562,13 @@ __init void lguest_init(void)
        /* Register our very early console. */
        virtio_cons_early_init(early_put_chars);
 
+       /* Don't let ACPI try to control our PCI interrupts. */
+       disable_acpi();
+
+       /* We control them ourselves, by overriding these two hooks. */
+       pcibios_enable_irq = lguest_enable_irq;
+       pcibios_disable_irq = lguest_disable_irq;
+
        /*
         * Last of all, we set the power management poweroff hook to point to
         * the Guest routine to power off, and the reboot hook to our restart
index 553c094b9cd7984b7334a95122931a93249f1ddf..a110efca6d068f7d881f8c1d955d8a6906457f64 100644 (file)
@@ -238,6 +238,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
        }
 }
 
+static const char *page_size_string(struct map_range *mr)
+{
+       static const char str_1g[] = "1G";
+       static const char str_2m[] = "2M";
+       static const char str_4m[] = "4M";
+       static const char str_4k[] = "4k";
+
+       if (mr->page_size_mask & (1<<PG_LEVEL_1G))
+               return str_1g;
+       /*
+        * 32-bit without PAE has a 4M large page size.
+        * PG_LEVEL_2M is misnamed, but we can at least
+        * print out the right size in the string.
+        */
+       if (IS_ENABLED(CONFIG_X86_32) &&
+           !IS_ENABLED(CONFIG_X86_PAE) &&
+           mr->page_size_mask & (1<<PG_LEVEL_2M))
+               return str_4m;
+
+       if (mr->page_size_mask & (1<<PG_LEVEL_2M))
+               return str_2m;
+
+       return str_4k;
+}
+
 static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                                     unsigned long start,
                                     unsigned long end)
@@ -333,8 +358,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
        for (i = 0; i < nr_range; i++)
                printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
                                mr[i].start, mr[i].end - 1,
-                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
-                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+                               page_size_string(&mr[i]));
 
        return nr_range;
 }
index 919b91205cd4be57760c50956eddb2d02dc13c45..df4552bd239e03b4a02e6505454e41420d530461 100644 (file)
@@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
 
-static unsigned int stack_maxrandom_size(void)
+static unsigned long stack_maxrandom_size(void)
 {
-       unsigned int max = 0;
+       unsigned long max = 0;
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
+               max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
        }
 
        return max;
index 85afde1fa3e5f6465e40a0599e17669a934b88b9..a62e0be3a2f1b4f563ab5f6fda0acb59612ea523 100644 (file)
@@ -5,6 +5,7 @@ obj-y   += geode/
 obj-y  += goldfish/
 obj-y  += iris/
 obj-y  += intel-mid/
+obj-y  += intel-quark/
 obj-y  += olpc/
 obj-y  += scx200/
 obj-y  += sfi/
index 5fcda7272550a79b52660030571adeaf9d0f197b..86d0f9e08dd95eb1023d5ac7ec4fb006aafb72c9 100644 (file)
@@ -91,167 +91,6 @@ ENTRY(efi_call)
        ret
 ENDPROC(efi_call)
 
-#ifdef CONFIG_EFI_MIXED
-
-/*
- * We run this function from the 1:1 mapping.
- *
- * This function must be invoked with a 1:1 mapped stack.
- */
-ENTRY(__efi64_thunk)
-       movl    %ds, %eax
-       push    %rax
-       movl    %es, %eax
-       push    %rax
-       movl    %ss, %eax
-       push    %rax
-
-       subq    $32, %rsp
-       movl    %esi, 0x0(%rsp)
-       movl    %edx, 0x4(%rsp)
-       movl    %ecx, 0x8(%rsp)
-       movq    %r8, %rsi
-       movl    %esi, 0xc(%rsp)
-       movq    %r9, %rsi
-       movl    %esi,  0x10(%rsp)
-
-       sgdt    save_gdt(%rip)
-
-       leaq    1f(%rip), %rbx
-       movq    %rbx, func_rt_ptr(%rip)
-
-       /* Switch to gdt with 32-bit segments */
-       movl    64(%rsp), %eax
-       lgdt    (%rax)
-
-       leaq    efi_enter32(%rip), %rax
-       pushq   $__KERNEL_CS
-       pushq   %rax
-       lretq
-
-1:     addq    $32, %rsp
-
-       lgdt    save_gdt(%rip)
-
-       pop     %rbx
-       movl    %ebx, %ss
-       pop     %rbx
-       movl    %ebx, %es
-       pop     %rbx
-       movl    %ebx, %ds
-
-       /*
-        * Convert 32-bit status code into 64-bit.
-        */
-       test    %rax, %rax
-       jz      1f
-       movl    %eax, %ecx
-       andl    $0x0fffffff, %ecx
-       andl    $0xf0000000, %eax
-       shl     $32, %rax
-       or      %rcx, %rax
-1:
-       ret
-ENDPROC(__efi64_thunk)
-
-ENTRY(efi_exit32)
-       movq    func_rt_ptr(%rip), %rax
-       push    %rax
-       mov     %rdi, %rax
-       ret
-ENDPROC(efi_exit32)
-
-       .code32
-/*
- * EFI service pointer must be in %edi.
- *
- * The stack should represent the 32-bit calling convention.
- */
-ENTRY(efi_enter32)
-       movl    $__KERNEL_DS, %eax
-       movl    %eax, %ds
-       movl    %eax, %es
-       movl    %eax, %ss
-
-       /* Reload pgtables */
-       movl    %cr3, %eax
-       movl    %eax, %cr3
-
-       /* Disable paging */
-       movl    %cr0, %eax
-       btrl    $X86_CR0_PG_BIT, %eax
-       movl    %eax, %cr0
-
-       /* Disable long mode via EFER */
-       movl    $MSR_EFER, %ecx
-       rdmsr
-       btrl    $_EFER_LME, %eax
-       wrmsr
-
-       call    *%edi
-
-       /* We must preserve return value */
-       movl    %eax, %edi
-
-       /*
-        * Some firmware will return with interrupts enabled. Be sure to
-        * disable them before we switch GDTs.
-        */
-       cli
-
-       movl    68(%esp), %eax
-       movl    %eax, 2(%eax)
-       lgdtl   (%eax)
-
-       movl    %cr4, %eax
-       btsl    $(X86_CR4_PAE_BIT), %eax
-       movl    %eax, %cr4
-
-       movl    %cr3, %eax
-       movl    %eax, %cr3
-
-       movl    $MSR_EFER, %ecx
-       rdmsr
-       btsl    $_EFER_LME, %eax
-       wrmsr
-
-       xorl    %eax, %eax
-       lldt    %ax
-
-       movl    72(%esp), %eax
-       pushl   $__KERNEL_CS
-       pushl   %eax
-
-       /* Enable paging */
-       movl    %cr0, %eax
-       btsl    $X86_CR0_PG_BIT, %eax
-       movl    %eax, %cr0
-       lret
-ENDPROC(efi_enter32)
-
-       .data
-       .balign 8
-       .global efi32_boot_gdt
-efi32_boot_gdt:        .word   0
-               .quad   0
-
-save_gdt:      .word   0
-               .quad   0
-func_rt_ptr:   .quad   0
-
-       .global efi_gdt64
-efi_gdt64:
-       .word   efi_gdt64_end - efi_gdt64
-       .long   0                       /* Filled out by user */
-       .word   0
-       .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
-       .quad   0x0080890000000000      /* TS descriptor */
-       .quad   0x0000000000000000      /* TS continued */
-efi_gdt64_end:
-#endif /* CONFIG_EFI_MIXED */
-
        .data
 ENTRY(efi_scratch)
        .fill 3,8,0
index 8806fa73e6e6d22337ff69c708583adf3c38cbbf..ff85d28c50f261c728eb0731391706b1af3777af 100644 (file)
@@ -1,9 +1,26 @@
 /*
  * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ *
+ * Support for invoking 32-bit EFI runtime services from a 64-bit
+ * kernel.
+ *
+ * The below thunking functions are only used after ExitBootServices()
+ * has been called. This simplifies things considerably as compared with
+ * the early EFI thunking because we can leave all the kernel state
+ * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
+ * services from __KERNEL32_CS. This means we can continue to service
+ * interrupts across an EFI mixed mode call.
+ *
+ * We do however, need to handle the fact that we're running in a full
+ * 64-bit virtual address space. Things like the stack and instruction
+ * addresses need to be accessible by the 32-bit firmware, so we rely on
+ * using the identity mappings in the EFI page table to access the stack
+ * and kernel text (see efi_setup_page_tables()).
  */
 
 #include <linux/linkage.h>
 #include <asm/page_types.h>
+#include <asm/segment.h>
 
        .text
        .code64
@@ -33,14 +50,6 @@ ENTRY(efi64_thunk)
        leaq    efi_exit32(%rip), %rbx
        subq    %rax, %rbx
        movl    %ebx, 8(%rsp)
-       leaq    efi_gdt64(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 2(%ebx)
-       movl    %ebx, 4(%rsp)
-       leaq    efi_gdt32(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 2(%ebx)
-       movl    %ebx, (%rsp)
 
        leaq    __efi64_thunk(%rip), %rbx
        subq    %rax, %rbx
@@ -52,14 +61,92 @@ ENTRY(efi64_thunk)
        retq
 ENDPROC(efi64_thunk)
 
-       .data
-efi_gdt32:
-       .word   efi_gdt32_end - efi_gdt32
-       .long   0                       /* Filled out above */
-       .word   0
-       .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00cf9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
-efi_gdt32_end:
+/*
+ * We run this function from the 1:1 mapping.
+ *
+ * This function must be invoked with a 1:1 mapped stack.
+ */
+ENTRY(__efi64_thunk)
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /* Switch to 32-bit descriptor */
+       pushq   $__KERNEL32_CS
+       leaq    efi_enter32(%rip), %rax
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
 
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       ret
+ENDPROC(__efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       movl    72(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+func_rt_ptr:           .quad 0
 efi_saved_sp:          .quad 0
index 1bbedc4b0f88d46bee5000779c4ef5aa4e4d0411..3005f0c89f2ecfbcc817c7e379c97586d5524e7f 100644 (file)
@@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void)
                intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
        else {
                intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-               pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+               pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
        }
 
 out:
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644 (file)
index 0000000..9cc57ed
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IMR) += imr.o
+obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644 (file)
index 0000000..0ee619f
--- /dev/null
@@ -0,0 +1,661 @@
+/**
+ * imr.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR registers define an isolated region of memory that can
+ * be masked to prohibit certain system agents from accessing memory.
+ * When a device behind a masked port performs an access - snooped or
+ * not, an IMR may optionally prevent that transaction from changing
+ * the state of memory or from getting correct data in response to the
+ * operation.
+ *
+ * Write data will be dropped and reads will return 0xFFFFFFFF, the
+ * system will reset and system BIOS will print out an error message to
+ * inform the user that an IMR has been violated.
+ *
+ * This code is based on the Linux MTRR code and reference code from
+ * Intel's Quark BSP EFI, Linux and grub code.
+ *
+ * See quark-x1000-datasheet.pdf for register definitions.
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
+#include <asm/imr.h>
+#include <asm/iosf_mbi.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct imr_device {
+       struct dentry   *file;
+       bool            init;
+       struct mutex    lock;
+       int             max_imr;
+       int             reg_base;
+};
+
+static struct imr_device imr_dev;
+
+/*
+ * IMR read/write mask control registers.
+ * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
+ * bit definitions.
+ *
+ * addr_hi
+ * 31          Lock bit
+ * 30:24       Reserved
+ * 23:2                1 KiB aligned lo address
+ * 1:0         Reserved
+ *
+ * addr_hi
+ * 31:24       Reserved
+ * 23:2                1 KiB aligned hi address
+ * 1:0         Reserved
+ */
+#define IMR_LOCK       BIT(31)
+
+struct imr_regs {
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 rmask;
+       u32 wmask;
+};
+
+#define IMR_NUM_REGS   (sizeof(struct imr_regs)/sizeof(u32))
+#define IMR_SHIFT      8
+#define imr_to_phys(x) ((x) << IMR_SHIFT)
+#define phys_to_imr(x) ((x) >> IMR_SHIFT)
+
+/**
+ * imr_is_enabled - true if an IMR is enabled false otherwise.
+ *
+ * Determines if an IMR is enabled based on address range and read/write
+ * mask. An IMR set with an address range set to zero and a read/write
+ * access mask set to all is considered to be disabled. An IMR in any
+ * other state - for example set to zero but without read/write access
+ * all is considered to be enabled. This definition of disabled is how
+ * firmware switches off an IMR and is maintained in kernel for
+ * consistency.
+ *
+ * @imr:       pointer to IMR descriptor.
+ * @return:    true if IMR enabled false if disabled.
+ */
+static inline int imr_is_enabled(struct imr_regs *imr)
+{
+       return !(imr->rmask == IMR_READ_ACCESS_ALL &&
+                imr->wmask == IMR_WRITE_ACCESS_ALL &&
+                imr_to_phys(imr->addr_lo) == 0 &&
+                imr_to_phys(imr->addr_hi) == 0);
+}
+
+/**
+ * imr_read - read an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @imr_id:    IMR entry to read.
+ * @imr:       IMR structure representing address and access masks.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
+{
+       u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+       int ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->addr_lo);
+       if (ret)
+               return ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->addr_hi);
+       if (ret)
+               return ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->rmask);
+       if (ret)
+               return ret;
+
+       return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->wmask);
+}
+
+/**
+ * imr_write - write an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ * Note lock bits need to be written independently of address bits.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @imr_id:    IMR entry to write.
+ * @imr:       IMR structure representing address and access masks.
+ * @lock:      indicates if the IMR lock bit should be applied.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_write(struct imr_device *idev, u32 imr_id,
+                    struct imr_regs *imr, bool lock)
+{
+       unsigned long flags;
+       u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+       int ret;
+
+       local_irq_save(flags);
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
+                               imr->addr_lo);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->addr_hi);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->rmask);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->wmask);
+       if (ret)
+               goto failed;
+
+       /* Lock bit must be set separately to addr_lo address bits. */
+       if (lock) {
+               imr->addr_lo |= IMR_LOCK;
+               ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                                       reg - IMR_NUM_REGS, imr->addr_lo);
+               if (ret)
+                       goto failed;
+       }
+
+       local_irq_restore(flags);
+       return 0;
+failed:
+       /*
+        * If writing to the IOSF failed then we're in an unknown state,
+        * likely a very bad state. An IMR in an invalid state will almost
+        * certainly lead to a memory access violation.
+        */
+       local_irq_restore(flags);
+       WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
+            imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
+
+       return ret;
+}
+
+/**
+ * imr_dbgfs_state_show - print state of IMR registers.
+ *
+ * @s:         pointer to seq_file for output.
+ * @unused:    unused parameter.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
+{
+       phys_addr_t base;
+       phys_addr_t end;
+       int i;
+       struct imr_device *idev = s->private;
+       struct imr_regs imr;
+       size_t size;
+       int ret = -ENODEV;
+
+       mutex_lock(&idev->lock);
+
+       for (i = 0; i < idev->max_imr; i++) {
+
+               ret = imr_read(idev, i, &imr);
+               if (ret)
+                       break;
+
+               /*
+                * Remember to add IMR_ALIGN bytes to size to indicate the
+                * inherent IMR_ALIGN size bytes contained in the masked away
+                * lower ten bits.
+                */
+               if (imr_is_enabled(&imr)) {
+                       base = imr_to_phys(imr.addr_lo);
+                       end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+               } else {
+                       base = 0;
+                       end = 0;
+               }
+               size = end - base;
+               seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
+                          "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
+                          &base, &end, size, imr.rmask, imr.wmask,
+                          imr_is_enabled(&imr) ? "enabled " : "disabled",
+                          imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
+       }
+
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+
+/**
+ * imr_state_open - debugfs open callback.
+ *
+ * @inode:     pointer to struct inode.
+ * @file:      pointer to struct file.
+ * @return:    result of single open.
+ */
+static int imr_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, imr_dbgfs_state_show, inode->i_private);
+}
+
+static const struct file_operations imr_state_ops = {
+       .open           = imr_state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/**
+ * imr_debugfs_register - register debugfs hooks.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:    0 on success - errno on failure.
+ */
+static int imr_debugfs_register(struct imr_device *idev)
+{
+       idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL,
+                                        idev, &imr_state_ops);
+       return PTR_ERR_OR_ZERO(idev->file);
+}
+
+/**
+ * imr_debugfs_unregister - unregister debugfs hooks.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:
+ */
+static void imr_debugfs_unregister(struct imr_device *idev)
+{
+       debugfs_remove(idev->file);
+}
+
+/**
+ * imr_check_params - check passed address range IMR alignment and non-zero size
+ *
+ * @base:      base address of intended IMR.
+ * @size:      size of intended IMR.
+ * @return:    zero on valid range -EINVAL on unaligned base/size.
+ */
+static int imr_check_params(phys_addr_t base, size_t size)
+{
+       if ((base & IMR_MASK) || (size & IMR_MASK)) {
+               pr_err("base %pa size 0x%08zx must align to 1KiB\n",
+                       &base, size);
+               return -EINVAL;
+       }
+       if (size == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
+ *
+ * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
+ * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
+ * as a result.
+ *
+ * @size:      input size bytes.
+ * @return:    reduced size.
+ */
+static inline size_t imr_raw_size(size_t size)
+{
+       return size - IMR_ALIGN;
+}
+
+/**
+ * imr_address_overlap - detects an address overlap.
+ *
+ * @addr:      address to check against an existing IMR.
+ * @imr:       imr being checked.
+ * @return:    true for overlap false for no overlap.
+ */
+static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
+{
+       return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
+}
+
+/**
+ * imr_add_range - add an Isolated Memory Region.
+ *
+ * @base:      physical base address of region aligned to 1KiB.
+ * @size:      physical size of region in bytes must be aligned to 1KiB.
+ * @read_mask: read access mask.
+ * @write_mask:        write access mask.
+ * @lock:      indicates whether or not to permanently lock this region.
+ * @return:    zero on success or negative value indicating error.
+ */
+int imr_add_range(phys_addr_t base, size_t size,
+                 unsigned int rmask, unsigned int wmask, bool lock)
+{
+       phys_addr_t end;
+       unsigned int i;
+       struct imr_device *idev = &imr_dev;
+       struct imr_regs imr;
+       size_t raw_size;
+       int reg;
+       int ret;
+
+       if (WARN_ONCE(idev->init == false, "driver not initialized"))
+               return -ENODEV;
+
+       ret = imr_check_params(base, size);
+       if (ret)
+               return ret;
+
+       /* Tweak the size value. */
+       raw_size = imr_raw_size(size);
+       end = base + raw_size;
+
+       /*
+        * Check for reserved IMR value common to firmware, kernel and grub
+        * indicating a disabled IMR.
+        */
+       imr.addr_lo = phys_to_imr(base);
+       imr.addr_hi = phys_to_imr(end);
+       imr.rmask = rmask;
+       imr.wmask = wmask;
+       if (!imr_is_enabled(&imr))
+               return -ENOTSUPP;
+
+       mutex_lock(&idev->lock);
+
+       /*
+        * Find a free IMR while checking for an existing overlapping range.
+        * Note there's no restriction in silicon to prevent IMR overlaps.
+        * For the sake of simplicity and ease in defining/debugging an IMR
+        * memory map we exclude IMR overlaps.
+        */
+       reg = -1;
+       for (i = 0; i < idev->max_imr; i++) {
+               ret = imr_read(idev, i, &imr);
+               if (ret)
+                       goto failed;
+
+               /* Find overlap @ base or end of requested range. */
+               ret = -EINVAL;
+               if (imr_is_enabled(&imr)) {
+                       if (imr_address_overlap(base, &imr))
+                               goto failed;
+                       if (imr_address_overlap(end, &imr))
+                               goto failed;
+               } else {
+                       reg = i;
+               }
+       }
+
+       /* Error out if we have no free IMR entries. */
+       if (reg == -1) {
+               ret = -ENOMEM;
+               goto failed;
+       }
+
+       pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
+                reg, &base, &end, raw_size, rmask, wmask);
+
+       /* Enable IMR at specified range and access mask. */
+       imr.addr_lo = phys_to_imr(base);
+       imr.addr_hi = phys_to_imr(end);
+       imr.rmask = rmask;
+       imr.wmask = wmask;
+
+       ret = imr_write(idev, reg, &imr, lock);
+       if (ret < 0) {
+               /*
+                * In the highly unlikely event iosf_mbi_write failed
+                * attempt to rollback the IMR setup skipping the trapping
+                * of further IOSF write failures.
+                */
+               imr.addr_lo = 0;
+               imr.addr_hi = 0;
+               imr.rmask = IMR_READ_ACCESS_ALL;
+               imr.wmask = IMR_WRITE_ACCESS_ALL;
+               imr_write(idev, reg, &imr, false);
+       }
+failed:
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(imr_add_range);
+
+/**
+ * __imr_remove_range - delete an Isolated Memory Region.
+ *
+ * This function allows you to delete an IMR by its index specified by reg or
+ * by address range specified by base and size respectively. If you specify an
+ * index on its own the base and size parameters are ignored.
+ * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
+ * imr_remove_range(-1, base, size); delete IMR from base to base+size.
+ *
+ * @reg:       imr index to remove.
+ * @base:      physical base address of region aligned to 1 KiB.
+ * @size:      physical size of region in bytes aligned to 1 KiB.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
+{
+       phys_addr_t end;
+       bool found = false;
+       unsigned int i;
+       struct imr_device *idev = &imr_dev;
+       struct imr_regs imr;
+       size_t raw_size;
+       int ret = 0;
+
+       if (WARN_ONCE(idev->init == false, "driver not initialized"))
+               return -ENODEV;
+
+       /*
+        * Validate address range if deleting by address, else we are
+        * deleting by index where base and size will be ignored.
+        */
+       if (reg == -1) {
+               ret = imr_check_params(base, size);
+               if (ret)
+                       return ret;
+       }
+
+       /* Tweak the size value. */
+       raw_size = imr_raw_size(size);
+       end = base + raw_size;
+
+       mutex_lock(&idev->lock);
+
+       if (reg >= 0) {
+               /* If a specific IMR is given try to use it. */
+               ret = imr_read(idev, reg, &imr);
+               if (ret)
+                       goto failed;
+
+               if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
+                       ret = -ENODEV;
+                       goto failed;
+               }
+               found = true;
+       } else {
+               /* Search for match based on address range. */
+               for (i = 0; i < idev->max_imr; i++) {
+                       ret = imr_read(idev, i, &imr);
+                       if (ret)
+                               goto failed;
+
+                       if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
+                               continue;
+
+                       if ((imr_to_phys(imr.addr_lo) == base) &&
+                           (imr_to_phys(imr.addr_hi) == end)) {
+                               found = true;
+                               reg = i;
+                               break;
+                       }
+               }
+       }
+
+       if (!found) {
+               ret = -ENODEV;
+               goto failed;
+       }
+
+       pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
+
+       /* Tear down the IMR. */
+       imr.addr_lo = 0;
+       imr.addr_hi = 0;
+       imr.rmask = IMR_READ_ACCESS_ALL;
+       imr.wmask = IMR_WRITE_ACCESS_ALL;
+
+       ret = imr_write(idev, reg, &imr, false);
+
+failed:
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+
+/**
+ * imr_remove_range - delete an Isolated Memory Region by address
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by base and size respectively.
+ * imr_remove_range(base, size); delete IMR from base to base+size.
+ *
+ * @base:      physical base address of region aligned to 1 KiB.
+ * @size:      physical size of region in bytes aligned to 1 KiB.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+int imr_remove_range(phys_addr_t base, size_t size)
+{
+       return __imr_remove_range(-1, base, size);
+}
+EXPORT_SYMBOL_GPL(imr_remove_range);
+
+/**
+ * imr_clear - delete an Isolated Memory Region by index
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by the index of the IMR. Useful for initial sanitization of the IMR
+ * address map.
+ * imr_ge(base, size); delete IMR from base to base+size.
+ *
+ * @reg:       imr index to remove.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+static inline int imr_clear(int reg)
+{
+       return __imr_remove_range(reg, 0, 0);
+}
+
+/**
+ * imr_fixup_memmap - Tear down IMRs used during bootup.
+ *
+ * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
+ * that need to be removed before the kernel hands out one of the IMR
+ * encased addresses to a downstream DMA agent such as the SD or Ethernet.
+ * IMRs on Galileo are setup to immediately reset the system on violation.
+ * As a result if you're running a root filesystem from SD - you'll need
+ * the boot-time IMRs torn down or you'll find seemingly random resets when
+ * using your filesystem.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:
+ */
+static void __init imr_fixup_memmap(struct imr_device *idev)
+{
+       phys_addr_t base = virt_to_phys(&_text);
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       int i;
+       int ret;
+
+       /* Tear down all existing unlocked IMRs. */
+       for (i = 0; i < idev->max_imr; i++)
+               imr_clear(i);
+
+       /*
+        * Setup a locked IMR around the physical extent of the kernel
+        * from the beginning of the .text secton to the end of the
+        * .rodata section as one physically contiguous block.
+        */
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+       if (ret < 0) {
+               pr_err("unable to setup IMR for kernel: (%p - %p)\n",
+                       &_text, &__end_rodata);
+       } else {
+               pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
+                       size / 1024, &_text, &__end_rodata);
+       }
+
+}
+
+static const struct x86_cpu_id imr_ids[] __initconst = {
+       { X86_VENDOR_INTEL, 5, 9 },     /* Intel Quark SoC X1000. */
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, imr_ids);
+
+/**
+ * imr_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_init(void)
+{
+       struct imr_device *idev = &imr_dev;
+       int ret;
+
+       if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
+               return -ENODEV;
+
+       idev->max_imr = QUARK_X1000_IMR_MAX;
+       idev->reg_base = QUARK_X1000_IMR_REGBASE;
+       idev->init = true;
+
+       mutex_init(&idev->lock);
+       ret = imr_debugfs_register(idev);
+       if (ret != 0)
+               pr_warn("debugfs register failed!\n");
+       imr_fixup_memmap(idev);
+       return 0;
+}
+
+/**
+ * imr_exit - exit point for IMR code.
+ *
+ * Deregisters debugfs, leave IMR state as-is.
+ *
+ * return:
+ */
+static void __exit imr_exit(void)
+{
+       imr_debugfs_unregister(&imr_dev);
+}
+
+module_init(imr_init);
+module_exit(imr_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644 (file)
index 0000000..c9a0838
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * imr_selftest.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR self test. The purpose of this module is to run a set of tests on the
+ * IMR API to validate it's sanity. We check for overlapping, reserved
+ * addresses and setup/teardown sanity.
+ *
+ */
+
+#include <asm-generic/sections.h>
+#include <asm/imr.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define SELFTEST KBUILD_MODNAME ": "
+/**
+ * imr_self_test_result - Print result string for self test.
+ *
+ * @res:       result code - true if test passed false otherwise.
+ * @fmt:       format string.
+ * ...         variadic argument list.
+ */
+static void __init imr_self_test_result(int res, const char *fmt, ...)
+{
+       va_list vlist;
+
+       /* Print pass/fail. */
+       if (res)
+               pr_info(SELFTEST "pass ");
+       else
+               pr_info(SELFTEST "fail ");
+
+       /* Print variable string. */
+       va_start(vlist, fmt);
+       vprintk(fmt, vlist);
+       va_end(vlist);
+
+       /* Optional warning. */
+       WARN(res == 0, "test failed");
+}
+#undef SELFTEST
+
+/**
+ * imr_self_test
+ *
+ * Verify IMR self_test with some simple tests to verify overlap,
+ * zero sized allocations and 1 KiB sized areas.
+ *
+ */
+static void __init imr_self_test(void)
+{
+       phys_addr_t base  = virt_to_phys(&_text);
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
+       int ret;
+
+       /* Test zero zero. */
+       ret = imr_add_range(0, 0, 0, 0, false);
+       imr_self_test_result(ret < 0, "zero sized IMR\n");
+
+       /* Test exact overlap. */
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test overlap with base inside of existing. */
+       base += size - IMR_ALIGN;
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test overlap with end inside of existing. */
+       base -= size + IMR_ALIGN * 2;
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
+       ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
+                           IMR_WRITE_ACCESS_ALL, false);
+       imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
+
+       /* Test that a 1 KiB IMR @ zero with CPU only will work. */
+       ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
+       if (ret >= 0) {
+               ret = imr_remove_range(0, IMR_ALIGN);
+               imr_self_test_result(ret == 0, "teardown - cpu-access\n");
+       }
+
+       /* Test 2 KiB works. */
+       size = IMR_ALIGN * 2;
+       ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
+                           IMR_WRITE_ACCESS_ALL, false);
+       imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
+       if (ret >= 0) {
+               ret = imr_remove_range(0, size);
+               imr_self_test_result(ret == 0, "teardown 2KiB\n");
+       }
+}
+
+/**
+ * imr_self_test_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_self_test_init(void)
+{
+       imr_self_test();
+       return 0;
+}
+
+/**
+ * imr_self_test_exit - exit point for IMR code.
+ *
+ * return:
+ */
+static void __exit imr_self_test_exit(void)
+{
+}
+
+module_init(imr_self_test_init);
+module_exit(imr_self_test_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
+MODULE_LICENSE("Dual BSD/GPL");
index bd8b8459c3d05923286b4dc6b5ebb7e37e51d926..5240f563076de2e0e27c92af2d04ad03d213ee8f 100644 (file)
@@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val)
        BUG_ON(val);
 }
 #endif
+
+static u64 xen_read_msr_safe(unsigned int msr, int *err)
+{
+       u64 val;
+
+       val = native_read_msr_safe(msr, err);
+       switch (msr) {
+       case MSR_IA32_APICBASE:
+#ifdef CONFIG_X86_X2APIC
+               if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
+#endif
+                       val &= ~X2APIC_ENABLE;
+               break;
+       }
+       return val;
+}
+
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 
        .wbinvd = native_wbinvd,
 
-       .read_msr = native_read_msr_safe,
+       .read_msr = xen_read_msr_safe,
        .write_msr = xen_write_msr_safe,
 
        .read_tsc = native_read_tsc,
@@ -1741,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 #ifdef CONFIG_X86_32
        i386_start_kernel();
 #else
+       cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
        x86_64_start_reservations((char *)__pa_symbol(&boot_params));
 #endif
 }
index 23b45eb9a89ce4d56f8a73e181d004b107dc1f81..956374c1edbc31e4c1eb50c3fb29cb8828ad44b5 100644 (file)
@@ -41,7 +41,7 @@ static u8 zero_stats;
 static inline void check_zero(void)
 {
        u8 ret;
-       u8 old = ACCESS_ONCE(zero_stats);
+       u8 old = READ_ONCE(zero_stats);
        if (unlikely(old)) {
                ret = cmpxchg(&zero_stats, old, 0);
                /* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
        struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
        int cpu = smp_processor_id();
        u64 start;
+       __ticket_t head;
        unsigned long flags;
 
        /* If kicker interrupts not initialized yet, just spin */
@@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
         */
        __ticket_enter_slowpath(lock);
 
+       /* make sure enter_slowpath, which is atomic does not cross the read */
+       smp_mb__after_atomic();
+
        /*
         * check again make sure it didn't become free while
         * we weren't looking
         */
-       if (ACCESS_ONCE(lock->tickets.head) == want) {
+       head = READ_ONCE(lock->tickets.head);
+       if (__tickets_equal(head, want)) {
                add_stats(TAKEN_SLOW_PICKUP, 1);
                goto out;
        }
@@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
                const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
 
                /* Make sure we read lock before want */
-               if (ACCESS_ONCE(w->lock) == lock &&
-                   ACCESS_ONCE(w->want) == next) {
+               if (READ_ONCE(w->lock) == lock &&
+                   READ_ONCE(w->want) == next) {
                        add_stats(RELEASED_SLOW_KICKED, 1);
                        xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
                        break;
index 876eb380aa26702a79955fcd79e15b90a6adb4c5..147b26ed9c91f033cbb34b4b9506e302599baa23 100644 (file)
 #define get_fs()       (current->thread.current_ds)
 #define set_fs(val)    (current->thread.current_ds = (val))
 
-#define segment_eq(a,b)        ((a).seg == (b).seg)
+#define segment_eq(a, b)       ((a).seg == (b).seg)
 
 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-#define __user_ok(addr,size) \
+#define __user_ok(addr, size) \
                (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
-#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
-#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
 
 /*
  * These are the main single-value transfer routines.  They
  * (a) re-use the arguments for side effects (sizeof is ok)
  * (b) require any knowledge of processes at this stage
  */
-#define put_user(x,ptr)        __put_user_check((x),(ptr),sizeof(*(ptr)))
-#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
+#define put_user(x, ptr)       __put_user_check((x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
 
 /*
  * The "__xxx" versions of the user access functions are versions that
  * with a separate "access_ok()" call (this is used when we do multiple
  * accesses to the same area of user memory).
  */
-#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
 
 extern long __put_user_bad(void);
 
-#define __put_user_nocheck(x,ptr,size)                 \
+#define __put_user_nocheck(x, ptr, size)               \
 ({                                                     \
        long __pu_err;                                  \
-       __put_user_size((x),(ptr),(size),__pu_err);     \
+       __put_user_size((x), (ptr), (size), __pu_err);  \
        __pu_err;                                       \
 })
 
-#define __put_user_check(x,ptr,size)                           \
-({                                                             \
-       long __pu_err = -EFAULT;                                \
-       __typeof__(*(ptr)) *__pu_addr = (ptr);                  \
-       if (access_ok(VERIFY_WRITE,__pu_addr,size))             \
-               __put_user_size((x),__pu_addr,(size),__pu_err); \
-       __pu_err;                                               \
+#define __put_user_check(x, ptr, size)                                 \
+({                                                                     \
+       long __pu_err = -EFAULT;                                        \
+       __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
+       if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
+               __put_user_size((x), __pu_addr, (size), __pu_err);      \
+       __pu_err;                                                       \
 })
 
-#define __put_user_size(x,ptr,size,retval)                             \
+#define __put_user_size(x, ptr, size, retval)                          \
 do {                                                                   \
        int __cb;                                                       \
        retval = 0;                                                     \
        switch (size) {                                                 \
-       case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb);  break;      \
-       case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break;      \
-       case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break;      \
+       case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb);  break; \
+       case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
+       case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
        case 8: {                                                       \
                     __typeof__(*ptr) __v64 = x;                        \
-                    retval = __copy_to_user(ptr,&__v64,8);             \
+                    retval = __copy_to_user(ptr, &__v64, 8);           \
                     break;                                             \
                }                                                       \
        default: __put_user_bad();                                      \
@@ -316,35 +316,35 @@ __asm__ __volatile__(                                     \
        :"=r" (err), "=r" (cb)                          \
        :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
 
-#define __get_user_nocheck(x,ptr,size)                         \
+#define __get_user_nocheck(x, ptr, size)                       \
 ({                                                             \
        long __gu_err, __gu_val;                                \
-       __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;             \
        __gu_err;                                               \
 })
 
-#define __get_user_check(x,ptr,size)                                   \
+#define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
        long __gu_err = -EFAULT, __gu_val = 0;                          \
        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
-       if (access_ok(VERIFY_READ,__gu_addr,size))                      \
-               __get_user_size(__gu_val,__gu_addr,(size),__gu_err);    \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
+       if (access_ok(VERIFY_READ, __gu_addr, size))                    \
+               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                                       \
 })
 
 extern long __get_user_bad(void);
 
-#define __get_user_size(x,ptr,size,retval)                             \
+#define __get_user_size(x, ptr, size, retval)                          \
 do {                                                                   \
        int __cb;                                                       \
        retval = 0;                                                     \
        switch (size) {                                                 \
-       case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb);  break;     \
-       case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break;     \
-       case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb);  break;     \
-       case 8: retval = __copy_from_user(&x,ptr,8);    break;  \
+       case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
+       case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
+       case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
+       case 8: retval = __copy_from_user(&x, ptr, 8);    break;        \
        default: (x) = __get_user_bad();                                \
        }                                                               \
 } while (0)
@@ -390,19 +390,19 @@ __asm__ __volatile__(                     \
  */
 
 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
-#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
+#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
 
 
 static inline unsigned long
 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
 {
-       return __copy_user(to,from,n);
+       return __copy_user(to, from, n);
 }
 
 static inline unsigned long
 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
 {
-       return __copy_user(to,from,n);
+       return __copy_user(to, from, n);
 }
 
 static inline unsigned long
@@ -410,7 +410,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
 {
        prefetch(from);
        if (access_ok(VERIFY_WRITE, to, n))
-               return __copy_user(to,from,n);
+               return __copy_user(to, from, n);
        return n;
 }
 
@@ -419,18 +419,18 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
 {
        prefetchw(to);
        if (access_ok(VERIFY_READ, from, n))
-               return __copy_user(to,from,n);
+               return __copy_user(to, from, n);
        else
                memset(to, 0, n);
        return n;
 }
 
-#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
-#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
-#define __copy_to_user(to,from,n) \
-       __generic_copy_to_user_nocheck((to),(from),(n))
-#define __copy_from_user(to,from,n) \
-       __generic_copy_from_user_nocheck((to),(from),(n))
+#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
+#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
+#define __copy_to_user(to, from, n) \
+       __generic_copy_to_user_nocheck((to), (from), (n))
+#define __copy_from_user(to, from, n) \
+       __generic_copy_from_user_nocheck((to), (from), (n))
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
index 9273d0969ebd6377a02cf0be189d34ea619afe3e..5b9c6d5c3636ad6412c7b898c44e4709b35597d1 100644 (file)
@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
        struct blkg_rwstat rwstat = { }, tmp;
        int i, cpu;
 
+       if (tg->stats_cpu == NULL)
+               return 0;
+
        for_each_possible_cpu(cpu) {
                struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
index b18cd2151ddb244e1961c48165ab973d88880d71..623b117ad1a23ee09ac7aca1ca5d028b51ec2738 100644 (file)
@@ -55,6 +55,7 @@ acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
+acpi-y                         += acpi_lpat.o
 
 # These are (potentially) separate modules
 
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
new file mode 100644 (file)
index 0000000..feb61c1
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * acpi_lpat.c - LPAT table processing functions
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_lpat.h>
+
+/**
+ * acpi_lpat_raw_to_temp(): Return temperature from raw value through
+ * LPAT conversion table
+ *
+ * @lpat_table: the temperature_raw mapping table structure
+ * @raw: the raw value, used as a key to get the temerature from the
+ *       above mapping table
+ *
+ * A positive converted temperarure value will be returned on success,
+ * a negative errno will be returned in error cases.
+ */
+int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+                         int raw)
+{
+       int i, delta_temp, delta_raw, temp;
+       struct acpi_lpat *lpat = lpat_table->lpat;
+
+       for (i = 0; i < lpat_table->lpat_count - 1; i++) {
+               if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
+                   (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
+                       break;
+       }
+
+       if (i == lpat_table->lpat_count - 1)
+               return -ENOENT;
+
+       delta_temp = lpat[i+1].temp - lpat[i].temp;
+       delta_raw = lpat[i+1].raw - lpat[i].raw;
+       temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
+
+       return temp;
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
+
+/**
+ * acpi_lpat_temp_to_raw(): Return raw value from temperature through
+ * LPAT conversion table
+ *
+ * @lpat: the temperature_raw mapping table
+ * @temp: the temperature, used as a key to get the raw value from the
+ *        above mapping table
+ *
+ * A positive converted temperature value will be returned on success,
+ * a negative errno will be returned in error cases.
+ */
+int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+                         int temp)
+{
+       int i, delta_temp, delta_raw, raw;
+       struct acpi_lpat *lpat = lpat_table->lpat;
+
+       for (i = 0; i < lpat_table->lpat_count - 1; i++) {
+               if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
+                       break;
+       }
+
+       if (i ==  lpat_table->lpat_count - 1)
+               return -ENOENT;
+
+       delta_temp = lpat[i+1].temp - lpat[i].temp;
+       delta_raw = lpat[i+1].raw - lpat[i].raw;
+       raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
+
+       return raw;
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_temp_to_raw);
+
+/**
+ * acpi_lpat_get_conversion_table(): Parse ACPI LPAT table if present.
+ *
+ * @handle: Handle to acpi device
+ *
+ * Parse LPAT table to a struct of type acpi_lpat_table. On success
+ * it returns a pointer to newly allocated table. This table must
+ * be freed by the caller when finished processing, using a call to
+ * acpi_lpat_free_conversion_table.
+ */
+struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
+                                                                 handle)
+{
+       struct acpi_lpat_conversion_table *lpat_table = NULL;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj_p, *obj_e;
+       int *lpat, i;
+       acpi_status status;
+
+       status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
+       if (ACPI_FAILURE(status))
+               return NULL;
+
+       obj_p = (union acpi_object *)buffer.pointer;
+       if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
+           (obj_p->package.count % 2) || (obj_p->package.count < 4))
+               goto out;
+
+       lpat = kcalloc(obj_p->package.count, sizeof(int), GFP_KERNEL);
+       if (!lpat)
+               goto out;
+
+       for (i = 0; i < obj_p->package.count; i++) {
+               obj_e = &obj_p->package.elements[i];
+               if (obj_e->type != ACPI_TYPE_INTEGER) {
+                       kfree(lpat);
+                       goto out;
+               }
+               lpat[i] = (s64)obj_e->integer.value;
+       }
+
+       lpat_table = kzalloc(sizeof(*lpat_table), GFP_KERNEL);
+       if (!lpat_table) {
+               kfree(lpat);
+               goto out;
+       }
+
+       lpat_table->lpat = (struct acpi_lpat *)lpat;
+       lpat_table->lpat_count = obj_p->package.count / 2;
+
+out:
+       kfree(buffer.pointer);
+       return lpat_table;
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_get_conversion_table);
+
+/**
+ * acpi_lpat_free_conversion_table(): Free LPAT table.
+ *
+ * @lpat_table: the temperature_raw mapping table structure
+ *
+ * Frees the LPAT table previously allocated by a call to
+ * acpi_lpat_get_conversion_table.
+ */
+void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+                                    *lpat_table)
+{
+       if (lpat_table) {
+               kfree(lpat_table->lpat);
+               kfree(lpat_table);
+       }
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
+
+MODULE_LICENSE("GPL");
index 02e835f3cf8aa76326b9994768b75f2d39c39fed..657964e8ab7ed2ba9e56004d5f83881ad826e7b7 100644 (file)
@@ -105,7 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
        }
 }
 
-static void byt_i2c_setup(struct lpss_private_data *pdata)
+static void lpss_deassert_reset(struct lpss_private_data *pdata)
 {
        unsigned int offset;
        u32 val;
@@ -114,9 +114,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
        val = readl(pdata->mmio_base + offset);
        val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
        writel(val, pdata->mmio_base + offset);
+}
+
+#define LPSS_I2C_ENABLE                        0x6c
+
+static void byt_i2c_setup(struct lpss_private_data *pdata)
+{
+       lpss_deassert_reset(pdata);
 
        if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
                pdata->fixed_clk_rate = 133000000;
+
+       writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
@@ -125,7 +134,7 @@ static struct lpss_device_desc lpt_dev_desc = {
 };
 
 static struct lpss_device_desc lpt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_LTR,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
        .prv_offset = 0x800,
 };
 
@@ -166,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
        .setup = byt_i2c_setup,
 };
 
+static struct lpss_device_desc bsw_spi_dev_desc = {
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+       .prv_offset = 0x400,
+       .setup = lpss_deassert_reset,
+};
+
 #else
 
 #define LPSS_ADDR(desc) (0UL)
@@ -198,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        /* Braswell LPSS devices */
        { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
        { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
-       { "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
+       { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
        { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
 
        { "INT3430", LPSS_ADDR(lpt_dev_desc) },
index 982b67faaaf32c0de360aa35449a699075a6550a..a8dd2f7633822b05f5fdeeaffe5a00496f4710be 100644 (file)
@@ -680,7 +680,7 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
                /* Enable GPE for event processing (SCI_EVT=1) */
                if (!resuming)
                        acpi_ec_submit_request(ec);
-               pr_info("+++++ EC started +++++\n");
+               pr_debug("EC started\n");
        }
        spin_unlock_irqrestore(&ec->lock, flags);
 }
@@ -712,7 +712,7 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
                        acpi_ec_complete_request(ec);
                clear_bit(EC_FLAGS_STARTED, &ec->flags);
                clear_bit(EC_FLAGS_STOPPED, &ec->flags);
-               pr_info("+++++ EC stopped +++++\n");
+               pr_debug("EC stopped\n");
        }
        spin_unlock_irqrestore(&ec->lock, flags);
 }
index a732e5d7e322937ae66e537664fa02e677fda2bc..bd772cd5649466943ccb2e97ca57abff5e8238fe 100644 (file)
 #include <linux/module.h>
 #include <linux/acpi.h>
 #include <linux/regmap.h>
+#include <acpi/acpi_lpat.h>
 #include "intel_pmic.h"
 
 #define PMIC_POWER_OPREGION_ID         0x8d
 #define PMIC_THERMAL_OPREGION_ID       0x8c
 
-struct acpi_lpat {
-       int temp;
-       int raw;
-};
-
 struct intel_pmic_opregion {
        struct mutex lock;
-       struct acpi_lpat *lpat;
-       int lpat_count;
+       struct acpi_lpat_conversion_table *lpat_table;
        struct regmap *regmap;
        struct intel_pmic_opregion_data *data;
 };
@@ -50,105 +45,6 @@ static int pmic_get_reg_bit(int address, struct pmic_table *table,
        return -ENOENT;
 }
 
-/**
- * raw_to_temp(): Return temperature from raw value through LPAT table
- *
- * @lpat: the temperature_raw mapping table
- * @count: the count of the above mapping table
- * @raw: the raw value, used as a key to get the temerature from the
- *       above mapping table
- *
- * A positive value will be returned on success, a negative errno will
- * be returned in error cases.
- */
-static int raw_to_temp(struct acpi_lpat *lpat, int count, int raw)
-{
-       int i, delta_temp, delta_raw, temp;
-
-       for (i = 0; i < count - 1; i++) {
-               if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
-                   (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
-                       break;
-       }
-
-       if (i == count - 1)
-               return -ENOENT;
-
-       delta_temp = lpat[i+1].temp - lpat[i].temp;
-       delta_raw = lpat[i+1].raw - lpat[i].raw;
-       temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
-
-       return temp;
-}
-
-/**
- * temp_to_raw(): Return raw value from temperature through LPAT table
- *
- * @lpat: the temperature_raw mapping table
- * @count: the count of the above mapping table
- * @temp: the temperature, used as a key to get the raw value from the
- *        above mapping table
- *
- * A positive value will be returned on success, a negative errno will
- * be returned in error cases.
- */
-static int temp_to_raw(struct acpi_lpat *lpat, int count, int temp)
-{
-       int i, delta_temp, delta_raw, raw;
-
-       for (i = 0; i < count - 1; i++) {
-               if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
-                       break;
-       }
-
-       if (i == count - 1)
-               return -ENOENT;
-
-       delta_temp = lpat[i+1].temp - lpat[i].temp;
-       delta_raw = lpat[i+1].raw - lpat[i].raw;
-       raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
-
-       return raw;
-}
-
-static void pmic_thermal_lpat(struct intel_pmic_opregion *opregion,
-                             acpi_handle handle, struct device *dev)
-{
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj_p, *obj_e;
-       int *lpat, i;
-       acpi_status status;
-
-       status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
-       if (ACPI_FAILURE(status))
-               return;
-
-       obj_p = (union acpi_object *)buffer.pointer;
-       if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
-           (obj_p->package.count % 2) || (obj_p->package.count < 4))
-               goto out;
-
-       lpat = devm_kmalloc(dev, sizeof(int) * obj_p->package.count,
-                           GFP_KERNEL);
-       if (!lpat)
-               goto out;
-
-       for (i = 0; i < obj_p->package.count; i++) {
-               obj_e = &obj_p->package.elements[i];
-               if (obj_e->type != ACPI_TYPE_INTEGER) {
-                       devm_kfree(dev, lpat);
-                       goto out;
-               }
-               lpat[i] = (s64)obj_e->integer.value;
-       }
-
-       opregion->lpat = (struct acpi_lpat *)lpat;
-       opregion->lpat_count = obj_p->package.count / 2;
-
-out:
-       kfree(buffer.pointer);
-}
-
 static acpi_status intel_pmic_power_handler(u32 function,
                acpi_physical_address address, u32 bits, u64 *value64,
                void *handler_context, void *region_context)
@@ -192,12 +88,12 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion,
        if (raw_temp < 0)
                return raw_temp;
 
-       if (!opregion->lpat) {
+       if (!opregion->lpat_table) {
                *value = raw_temp;
                return 0;
        }
 
-       temp = raw_to_temp(opregion->lpat, opregion->lpat_count, raw_temp);
+       temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp);
        if (temp < 0)
                return temp;
 
@@ -223,9 +119,8 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
        if (!opregion->data->update_aux)
                return -ENXIO;
 
-       if (opregion->lpat) {
-               raw_temp = temp_to_raw(opregion->lpat, opregion->lpat_count,
-                                      *value);
+       if (opregion->lpat_table) {
+               raw_temp = acpi_lpat_temp_to_raw(opregion->lpat_table, *value);
                if (raw_temp < 0)
                        return raw_temp;
        } else {
@@ -314,6 +209,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
 {
        acpi_status status;
        struct intel_pmic_opregion *opregion;
+       int ret;
 
        if (!dev || !regmap || !d)
                return -EINVAL;
@@ -327,14 +223,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
 
        mutex_init(&opregion->lock);
        opregion->regmap = regmap;
-       pmic_thermal_lpat(opregion, handle, dev);
+       opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
 
        status = acpi_install_address_space_handler(handle,
                                                    PMIC_POWER_OPREGION_ID,
                                                    intel_pmic_power_handler,
                                                    NULL, opregion);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
+       if (ACPI_FAILURE(status)) {
+               ret = -ENODEV;
+               goto out_error;
+       }
 
        status = acpi_install_address_space_handler(handle,
                                                    PMIC_THERMAL_OPREGION_ID,
@@ -343,11 +241,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
        if (ACPI_FAILURE(status)) {
                acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
                                                  intel_pmic_power_handler);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_error;
        }
 
        opregion->data = d;
        return 0;
+
+out_error:
+       acpi_lpat_free_conversion_table(opregion->lpat_table);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
 
index 4752b99399870efd068a1f1a6c656b1ebe333349..c723668e3e277def6f8d6309fe1af21b989951fb 100644 (file)
@@ -46,7 +46,7 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
        if (len && reslen && reslen == len && start <= end)
                return true;
 
-       pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
+       pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
                io ? "io" : "mem", start, end, len);
 
        return false;
index 88a4f99dd2a7ccc117924a73b47ca0d5a93c8df5..debd30917010a17697102bc84d1e468c69c94d17 100644 (file)
@@ -540,6 +540,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
                },
        },
+       {
+        /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"),
+               },
+       },
 
        {
         /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
index 0ee48be23837e2e6655f137d047a27dc7c3df60d..9be17d3431bb000a4a18cb515fbb7fe9f7eda0d6 100644 (file)
@@ -1,6 +1,6 @@
 config BCMA_POSSIBLE
        bool
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM && HAS_DMA && PCI
        default y
 
 menu "Broadcom specific AMBA"
@@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE
 config BCMA_HOST_PCI
        bool "Support for BCMA on PCI-host bus"
        depends on BCMA_HOST_PCI_POSSIBLE
+       select BCMA_DRIVER_PCI
        default y
 
 config BCMA_DRIVER_PCI_HOSTMODE
@@ -44,6 +45,22 @@ config BCMA_HOST_SOC
 
          If unsure, say N
 
+# TODO: make it depend on PCI when ready
+config BCMA_DRIVER_PCI
+       bool
+       default y
+       help
+         BCMA bus may have many versions of PCIe core. This driver
+         supports:
+         1) PCIe core working in clientmode
+         2) PCIe Gen 2 clientmode core
+
+         In general PCIe (Gen 2) clientmode core is required on PCIe
+         hosted buses. It's responsible for initialization and basic
+         hardware management.
+         This driver is also prerequisite for a hostmode PCIe core
+         support.
+
 config BCMA_DRIVER_MIPS
        bool "BCMA Broadcom MIPS core driver"
        depends on BCMA && MIPS
index 838b4b9d352ffc1d6c7790426bde8502712cc8dc..f32af9b76bcd2aaa7e0da61e26b2e33174803f7f 100644 (file)
@@ -3,8 +3,8 @@ bcma-y                                  += driver_chipcommon.o driver_chipcommon_pmu.o
 bcma-y                                 += driver_chipcommon_b.o
 bcma-$(CONFIG_BCMA_SFLASH)             += driver_chipcommon_sflash.o
 bcma-$(CONFIG_BCMA_NFLASH)             += driver_chipcommon_nflash.o
-bcma-y                                 += driver_pci.o
-bcma-y                                 += driver_pcie2.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI)         += driver_pci.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI)         += driver_pcie2.o
 bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE)        += driver_pci_host.o
 bcma-$(CONFIG_BCMA_DRIVER_MIPS)                += driver_mips.o
 bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN)    += driver_gmac_cmn.o
index ac6c5fca906d015d585f35fd9b57dc6f68d9d70b..5a1d22489afc78d3a9a059655c6d661bcaf139b1 100644 (file)
@@ -26,6 +26,7 @@ bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
                     int timeout);
 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
 void bcma_init_bus(struct bcma_bus *bus);
+void bcma_unregister_cores(struct bcma_bus *bus);
 int bcma_bus_register(struct bcma_bus *bus);
 void bcma_bus_unregister(struct bcma_bus *bus);
 int __init bcma_bus_early_register(struct bcma_bus *bus);
@@ -42,6 +43,9 @@ int bcma_bus_scan(struct bcma_bus *bus);
 int bcma_sprom_get(struct bcma_bus *bus);
 
 /* driver_chipcommon.c */
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
 extern struct platform_device bcma_pflash_dev;
@@ -52,6 +56,8 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
 void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb);
 
 /* driver_chipcommon_pmu.c */
+void bcma_pmu_early_init(struct bcma_drv_cc *cc);
+void bcma_pmu_init(struct bcma_drv_cc *cc);
 u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
 u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
 
@@ -101,6 +107,14 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
 
 /* driver_pci.c */
 u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
+void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_up(struct bcma_drv_pci *pc);
+void bcma_core_pci_down(struct bcma_drv_pci *pc);
+
+/* driver_pcie2.c */
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
 
 extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
 
@@ -117,6 +131,39 @@ static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
 }
 #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
 
+/**************************************************
+ * driver_mips.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+unsigned int bcma_core_mips_irq(struct bcma_device *dev);
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
+void bcma_core_mips_init(struct bcma_drv_mips *mcore);
+#else
+static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+{
+       return 0;
+}
+static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+}
+static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore)
+{
+}
+#endif
+
+/**************************************************
+ * driver_gmac_cmn.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
+#else
+static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+}
+#endif
+
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 /* driver_gpio.c */
 int bcma_gpio_init(struct bcma_drv_cc *cc);
index 598a6cd9028a70a239f283101d8f65a33e9fdf77..dce34fb52e27facf1136315e4beb729afa79d504 100644 (file)
@@ -76,7 +76,7 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
        bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
 }
 
-#if IS_BUILTIN(CONFIG_BCM47XX)
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
 static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
        struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
@@ -215,7 +215,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->set               = bcma_gpio_set_value;
        chip->direction_input   = bcma_gpio_direction_input;
        chip->direction_output  = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX)
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
        chip->to_irq            = bcma_gpio_to_irq;
 #endif
 #if IS_BUILTIN(CONFIG_OF)
index 786666488a2dc2f7d0d076fa44096656501493a8..cfd35bc1c5a35752a1ac30f751cce6dcd544a864 100644 (file)
@@ -282,21 +282,21 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
 
-int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
+int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
                          bool enable)
 {
        struct pci_dev *pdev;
        u32 coremask, tmp;
        int err = 0;
 
-       if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
                /* This bcma device is not on a PCI host-bus. So the IRQs are
                 * not routed through the PCI core.
                 * So we must not enable routing through the PCI core. */
                goto out;
        }
 
-       pdev = pc->core->bus->host_pci;
+       pdev = bus->host_pci;
 
        err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
        if (err)
@@ -328,28 +328,12 @@ static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
        bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
 }
 
-void bcma_core_pci_up(struct bcma_bus *bus)
+void bcma_core_pci_up(struct bcma_drv_pci *pc)
 {
-       struct bcma_drv_pci *pc;
-
-       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
-               return;
-
-       pc = &bus->drv_pci[0];
-
        bcma_core_pci_extend_L1timer(pc, true);
 }
-EXPORT_SYMBOL_GPL(bcma_core_pci_up);
 
-void bcma_core_pci_down(struct bcma_bus *bus)
+void bcma_core_pci_down(struct bcma_drv_pci *pc)
 {
-       struct bcma_drv_pci *pc;
-
-       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
-               return;
-
-       pc = &bus->drv_pci[0];
-
        bcma_core_pci_extend_L1timer(pc, false);
 }
-EXPORT_SYMBOL_GPL(bcma_core_pci_down);
index c8a6b741967b390e20470f15d55d493fa9bc8ce8..c42cec7c7ecc0a88f649b315c8c8616a0439c2f8 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "bcma_private.h"
 #include <linux/pci.h>
+#include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 #include <asm/paccess.h>
index e4be537b0c66997700f9e0f4ba7a3a1adfff574e..b1a6e327cb23d44d10a2952d9e0aa5f356528b24 100644 (file)
@@ -10,6 +10,7 @@
 
 #include "bcma_private.h"
 #include <linux/bcma/bcma.h>
+#include <linux/pci.h>
 
 /**************************************************
  * R/W ops.
@@ -156,14 +157,23 @@ static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
 
 void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
 {
-       struct bcma_chipinfo *ci = &pcie2->core->bus->chipinfo;
+       struct bcma_bus *bus = pcie2->core->bus;
+       struct bcma_chipinfo *ci = &bus->chipinfo;
        u32 tmp;
 
        tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
        if ((tmp & 0xe) >> 1 == 2)
                bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
 
-       /* TODO: Do we need pcie_reqsize? */
+       switch (bus->chipinfo.id) {
+       case BCMA_CHIP_ID_BCM4360:
+       case BCMA_CHIP_ID_BCM4352:
+               pcie2->reqsize = 1024;
+               break;
+       default:
+               pcie2->reqsize = 128;
+               break;
+       }
 
        if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
                bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
@@ -173,3 +183,18 @@ void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
        pciedev_crwlpciegen2_180(pcie2);
        pciedev_crwlpciegen2_182(pcie2);
 }
+
+/**************************************************
+ * Runtime ops.
+ **************************************************/
+
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
+{
+       struct bcma_bus *bus = pcie2->core->bus;
+       struct pci_dev *dev = bus->host_pci;
+       int err;
+
+       err = pcie_set_readrq(dev, pcie2->reqsize);
+       if (err)
+               bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
+}
index 53c6a8a58859bb8b252b9921eb2c96935d099f7f..a62a2f9091f529d8a9c7ccfdb607446accc6f676 100644 (file)
@@ -213,16 +213,26 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
        /* Initialize struct, detect chip */
        bcma_init_bus(bus);
 
+       /* Scan bus to find out generation of PCIe core */
+       err = bcma_bus_scan(bus);
+       if (err)
+               goto err_pci_unmap_mmio;
+
+       if (bcma_find_core(bus, BCMA_CORE_PCIE2))
+               bus->host_is_pcie2 = true;
+
        /* Register */
        err = bcma_bus_register(bus);
        if (err)
-               goto err_pci_unmap_mmio;
+               goto err_unregister_cores;
 
        pci_set_drvdata(dev, bus);
 
 out:
        return err;
 
+err_unregister_cores:
+       bcma_unregister_cores(bus);
 err_pci_unmap_mmio:
        pci_iounmap(dev, bus->mmio);
 err_pci_release_regions:
@@ -283,9 +293,12 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xa8db, BCM43217 (sic!) */
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },  /* 0xa8dc */
@@ -310,3 +323,31 @@ void __exit bcma_host_pci_exit(void)
 {
        pci_unregister_driver(&bcma_pci_bridge_driver);
 }
+
+/**************************************************
+ * Runtime ops for drivers.
+ **************************************************/
+
+/* See also pcicore_up */
+void bcma_host_pci_up(struct bcma_bus *bus)
+{
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+               return;
+
+       if (bus->host_is_pcie2)
+               bcma_core_pcie2_up(&bus->drv_pcie2);
+       else
+               bcma_core_pci_up(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_up);
+
+/* See also pcicore_down */
+void bcma_host_pci_down(struct bcma_bus *bus)
+{
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+               return;
+
+       if (!bus->host_is_pcie2)
+               bcma_core_pci_down(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_down);
index 38bde6eab8a41867d92de40f7083e411efe67aeb..9635f1033ce5c46e7aba2863fa04a8bc86421aa9 100644 (file)
@@ -363,7 +363,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
        return 0;
 }
 
-static void bcma_unregister_cores(struct bcma_bus *bus)
+void bcma_unregister_cores(struct bcma_bus *bus)
 {
        struct bcma_device *core, *tmp;
 
index cbdfbbf983927e85a4a83d94d20047f2fadf6357..ceb32dd52a6ca5a777e97541646866985251a9f0 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/t10-pi.h>
 #include <linux/types.h>
 #include <scsi/sg.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
+#define NVME_MINORS            (1U << MINORBITS)
 #define NVME_Q_DEPTH           1024
 #define NVME_AQ_DEPTH          64
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT          (admin_timeout * HZ)
 #define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
-#define IOD_TIMEOUT            (retry_time * HZ)
 
 static unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
-static unsigned char retry_time = 30;
-module_param(retry_time, byte, 0644);
-MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
-
 static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown")
 static int nvme_major;
 module_param(nvme_major, int, 0);
 
+static int nvme_char_major;
+module_param(nvme_char_major, int, 0);
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -76,7 +76,8 @@ static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
-static struct notifier_block nvme_nb;
+
+static struct class *nvme_class;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@ struct async_cmd_info {
  * commands and one for I/O commands).
  */
 struct nvme_queue {
-       struct llist_node node;
        struct device *q_dmadev;
        struct nvme_dev *dev;
        char irqname[24];       /* nvme4294967295-65535\0 */
@@ -482,6 +482,115 @@ static int nvme_error_status(u16 status)
        }
 }
 
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+       if (be32_to_cpu(pi->ref_tag) == v)
+               pi->ref_tag = cpu_to_be32(p);
+}
+
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+       if (be32_to_cpu(pi->ref_tag) == p)
+               pi->ref_tag = cpu_to_be32(v);
+}
+
+/**
+ * nvme_dif_remap - remaps ref tags to bip seed and physical lba
+ *
+ * The virtual start sector is the one that was originally submitted by the
+ * block layer.        Due to partitioning, MD/DM cloning, etc. the actual physical
+ * start sector may be different. Remap protection information to match the
+ * physical LBA on writes, and back to the original seed on reads.
+ *
+ * Type 0 and 3 do not have a ref tag, so no remapping required.
+ */
+static void nvme_dif_remap(struct request *req,
+                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+       struct nvme_ns *ns = req->rq_disk->private_data;
+       struct bio_integrity_payload *bip;
+       struct t10_pi_tuple *pi;
+       void *p, *pmap;
+       u32 i, nlb, ts, phys, virt;
+
+       if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
+               return;
+
+       bip = bio_integrity(req->bio);
+       if (!bip)
+               return;
+
+       pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
+       if (!pmap)
+               return;
+
+       p = pmap;
+       virt = bip_get_seed(bip);
+       phys = nvme_block_nr(ns, blk_rq_pos(req));
+       nlb = (blk_rq_bytes(req) >> ns->lba_shift);
+       ts = ns->disk->integrity->tuple_size;
+
+       for (i = 0; i < nlb; i++, virt++, phys++) {
+               pi = (struct t10_pi_tuple *)p;
+               dif_swap(phys, virt, pi);
+               p += ts;
+       }
+       kunmap_atomic(pmap);
+}
+
+static int nvme_noop_verify(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+static int nvme_noop_generate(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+struct blk_integrity nvme_meta_noop = {
+       .name                   = "NVME_META_NOOP",
+       .generate_fn            = nvme_noop_generate,
+       .verify_fn              = nvme_noop_verify,
+};
+
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+       struct blk_integrity integrity;
+
+       switch (ns->pi_type) {
+       case NVME_NS_DPS_PI_TYPE3:
+               integrity = t10_pi_type3_crc;
+               break;
+       case NVME_NS_DPS_PI_TYPE1:
+       case NVME_NS_DPS_PI_TYPE2:
+               integrity = t10_pi_type1_crc;
+               break;
+       default:
+               integrity = nvme_meta_noop;
+               break;
+       }
+       integrity.tuple_size = ns->ms;
+       blk_integrity_register(ns->disk, &integrity);
+       blk_queue_max_integrity_segments(ns->queue, 1);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static void nvme_dif_remap(struct request *req,
+                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+}
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+}
+#endif
+
 static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
@@ -512,9 +621,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                        "completing aborted command with status:%04x\n",
                        status);
 
-       if (iod->nents)
+       if (iod->nents) {
                dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
                        rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               if (blk_integrity_rq(req)) {
+                       if (!rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_complete);
+                       dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
+                               rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               }
+       }
        nvme_free_iod(nvmeq->dev, iod);
 
        blk_mq_complete_request(req);
@@ -670,6 +786,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
        cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
        cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
        cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+       if (blk_integrity_rq(req)) {
+               cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+               switch (ns->pi_type) {
+               case NVME_NS_DPS_PI_TYPE3:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD;
+                       break;
+               case NVME_NS_DPS_PI_TYPE1:
+               case NVME_NS_DPS_PI_TYPE2:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD |
+                                       NVME_RW_PRINFO_PRCHK_REF;
+                       cmnd->rw.reftag = cpu_to_le32(
+                                       nvme_block_nr(ns, blk_rq_pos(req)));
+                       break;
+               }
+       } else if (ns->ms)
+               control |= NVME_RW_PRINFO_PRACT;
+
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
@@ -690,6 +824,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_iod *iod;
        enum dma_data_direction dma_dir;
 
+       /*
+        * If formated with metadata, require the block layer provide a buffer
+        * unless this namespace is formated such that the metadata can be
+        * stripped/generated by the controller with PRACT=1.
+        */
+       if (ns->ms && !blk_integrity_rq(req)) {
+               if (!(ns->pi_type && ns->ms == 8)) {
+                       req->errors = -EFAULT;
+                       blk_mq_complete_request(req);
+                       return BLK_MQ_RQ_QUEUE_OK;
+               }
+       }
+
        iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
        if (!iod)
                return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +872,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                                        iod->nents, dma_dir);
                        goto retry_cmd;
                }
+               if (blk_integrity_rq(req)) {
+                       if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
+                               goto error_cmd;
+
+                       sg_init_table(iod->meta_sg, 1);
+                       if (blk_rq_map_integrity_sg(
+                                       req->q, req->bio, iod->meta_sg) != 1)
+                               goto error_cmd;
+
+                       if (rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_prep);
+
+                       if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
+                               goto error_cmd;
+               }
        }
 
        nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +979,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
        return IRQ_WAKE_THREAD;
 }
 
-static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
-                                                               cmd_info)
-{
-       spin_lock_irq(&nvmeq->q_lock);
-       cancel_cmd_info(cmd_info, NULL);
-       spin_unlock_irq(&nvmeq->q_lock);
-}
-
 struct sync_cmd_info {
        struct task_struct *task;
        u32 result;
@@ -847,7 +1001,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
 static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
                                                u32 *result, unsigned timeout)
 {
-       int ret;
        struct sync_cmd_info cmdinfo;
        struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +1012,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
 
        nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
 
-       set_current_state(TASK_KILLABLE);
-       ret = nvme_submit_cmd(nvmeq, cmd);
-       if (ret) {
-               nvme_finish_cmd(nvmeq, req->tag, NULL);
-               set_current_state(TASK_RUNNING);
-       }
-       ret = schedule_timeout(timeout);
-
-       /*
-        * Ensure that sync_completion has either run, or that it will
-        * never run.
-        */
-       nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
-
-       /*
-        * We never got the completion
-        */
-       if (cmdinfo.status == -EINTR)
-               return -EINTR;
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       nvme_submit_cmd(nvmeq, cmd);
+       schedule();
 
        if (result)
                *result = cmdinfo.result;
-
        return cmdinfo.status;
 }
 
@@ -1158,29 +1294,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       /*
-        * The aborted req will be completed on receiving the abort req.
-        * We enable the timer again. If hit twice, it'll cause a device reset,
-        * as the device then is in a faulty state.
-        */
-       int ret = BLK_EH_RESET_TIMER;
-
        dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
                                                        nvmeq->qid);
-
        spin_lock_irq(&nvmeq->q_lock);
-       if (!nvmeq->dev->initialized) {
-               /*
-                * Force cancelled command frees the request, which requires we
-                * return BLK_EH_NOT_HANDLED.
-                */
-               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
-               ret = BLK_EH_NOT_HANDLED;
-       } else
-               nvme_abort_req(req);
+       nvme_abort_req(req);
        spin_unlock_irq(&nvmeq->q_lock);
 
-       return ret;
+       /*
+        * The aborted req will be completed on receiving the abort req.
+        * We enable the timer again. If hit twice, it'll cause a device reset,
+        * as the device then is in a faulty state.
+        */
+       return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1358,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
        struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
 
        spin_lock_irq(&nvmeq->q_lock);
-       nvme_process_cq(nvmeq);
        if (hctx && hctx->tags)
                blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1380,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
        }
        if (!qid && dev->admin_q)
                blk_mq_freeze_queue_start(dev->admin_q);
-       nvme_clear_queue(nvmeq);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +2002,24 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
        return 0;
 }
 
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+       u32 logical_block_size = queue_logical_block_size(ns->queue);
+       ns->queue->limits.discard_zeroes_data = 0;
+       ns->queue->limits.discard_alignment = logical_block_size;
+       ns->queue->limits.discard_granularity = logical_block_size;
+       ns->queue->limits.max_discard_sectors = 0xffffffff;
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
 static int nvme_revalidate_disk(struct gendisk *disk)
 {
        struct nvme_ns *ns = disk->private_data;
        struct nvme_dev *dev = ns->dev;
        struct nvme_id_ns *id;
        dma_addr_t dma_addr;
-       int lbaf;
+       int lbaf, pi_type, old_ms;
+       unsigned short bs;
 
        id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
                                                                GFP_KERNEL);
@@ -1890,16 +2028,51 @@ static int nvme_revalidate_disk(struct gendisk *disk)
                                                                __func__);
                return 0;
        }
+       if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
+               dev_warn(&dev->pci_dev->dev,
+                       "identify failed ns:%d, setting capacity to 0\n",
+                       ns->ns_id);
+               memset(id, 0, sizeof(*id));
+       }
 
-       if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
-               goto free;
-
-       lbaf = id->flbas & 0xf;
+       old_ms = ns->ms;
+       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
        ns->lba_shift = id->lbaf[lbaf].ds;
+       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+
+       /*
+        * If identify namespace failed, use default 512 byte block size so
+        * block layer can use before failing read/write for 0 capacity.
+        */
+       if (ns->lba_shift == 0)
+               ns->lba_shift = 9;
+       bs = 1 << ns->lba_shift;
+
+       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
+       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
+                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
+
+       if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
+                               ns->ms != old_ms ||
+                               bs != queue_logical_block_size(disk->queue) ||
+                               (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
+               blk_integrity_unregister(disk);
+
+       ns->pi_type = pi_type;
+       blk_queue_logical_block_size(ns->queue, bs);
+
+       if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
+                               !(id->flbas & NVME_NS_FLBAS_META_EXT))
+               nvme_init_integrity(ns);
+
+       if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
+               set_capacity(disk, 0);
+       else
+               set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+       if (dev->oncs & NVME_CTRL_ONCS_DSM)
+               nvme_config_discard(ns);
 
-       blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
- free:
        dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
        return 0;
 }
@@ -1923,8 +2096,7 @@ static int nvme_kthread(void *data)
                spin_lock(&dev_list_lock);
                list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
-                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
-                                                       dev->initialized) {
+                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
                                if (work_busy(&dev->reset_work))
                                        continue;
                                list_del_init(&dev->node);
@@ -1956,30 +2128,16 @@ static int nvme_kthread(void *data)
        return 0;
 }
 
-static void nvme_config_discard(struct nvme_ns *ns)
-{
-       u32 logical_block_size = queue_logical_block_size(ns->queue);
-       ns->queue->limits.discard_zeroes_data = 0;
-       ns->queue->limits.discard_alignment = logical_block_size;
-       ns->queue->limits.discard_granularity = logical_block_size;
-       ns->queue->limits.max_discard_sectors = 0xffffffff;
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
-}
-
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
-                       struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
 {
        struct nvme_ns *ns;
        struct gendisk *disk;
        int node = dev_to_node(&dev->pci_dev->dev);
-       int lbaf;
-
-       if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
-               return NULL;
 
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
-               return NULL;
+               return;
+
        ns->queue = blk_mq_init_queue(&dev->tagset);
        if (IS_ERR(ns->queue))
                goto out_free_ns;
@@ -1995,9 +2153,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
 
        ns->ns_id = nsid;
        ns->disk = disk;
-       lbaf = id->flbas & 0xf;
-       ns->lba_shift = id->lbaf[lbaf].ds;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+       ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+       list_add_tail(&ns->list, &dev->namespaces);
+
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        if (dev->max_hw_sectors)
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2169,26 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        disk->fops = &nvme_fops;
        disk->private_data = ns;
        disk->queue = ns->queue;
-       disk->driverfs_dev = &dev->pci_dev->dev;
+       disk->driverfs_dev = dev->device;
        disk->flags = GENHD_FL_EXT_DEVT;
        sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
-       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
-       if (dev->oncs & NVME_CTRL_ONCS_DSM)
-               nvme_config_discard(ns);
-
-       return ns;
 
+       /*
+        * Initialize capacity to 0 until we establish the namespace format and
+        * setup integrity extentions if necessary. The revalidate_disk after
+        * add_disk allows the driver to register with integrity if the format
+        * requires it.
+        */
+       set_capacity(disk, 0);
+       nvme_revalidate_disk(ns->disk);
+       add_disk(ns->disk);
+       if (ns->ms)
+               revalidate_disk(ns->disk);
+       return;
  out_free_queue:
        blk_cleanup_queue(ns->queue);
  out_free_ns:
        kfree(ns);
-       return NULL;
 }
 
 static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2313,20 @@ static int nvme_dev_add(struct nvme_dev *dev)
        struct pci_dev *pdev = dev->pci_dev;
        int res;
        unsigned nn, i;
-       struct nvme_ns *ns;
        struct nvme_id_ctrl *ctrl;
-       struct nvme_id_ns *id_ns;
        void *mem;
        dma_addr_t dma_addr;
        int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-       mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
+       mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
        res = nvme_identify(dev, 0, 1, dma_addr);
        if (res) {
                dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
-               res = -EIO;
-               goto out;
+               dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+               return -EIO;
        }
 
        ctrl = mem;
@@ -2191,6 +2352,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
                } else
                        dev->max_hw_sectors = max_hw_sectors;
        }
+       dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
 
        dev->tagset.ops = &nvme_mq_ops;
        dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2365,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
        dev->tagset.driver_data = dev;
 
        if (blk_mq_alloc_tag_set(&dev->tagset))
-               goto out;
-
-       id_ns = mem;
-       for (i = 1; i <= nn; i++) {
-               res = nvme_identify(dev, i, 0, dma_addr);
-               if (res)
-                       continue;
-
-               if (id_ns->ncap == 0)
-                       continue;
-
-               res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
-                                                       dma_addr + 4096, NULL);
-               if (res)
-                       memset(mem + 4096, 0, 4096);
+               return 0;
 
-               ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
-               if (ns)
-                       list_add_tail(&ns->list, &dev->namespaces);
-       }
-       list_for_each_entry(ns, &dev->namespaces, list)
-               add_disk(ns->disk);
-       res = 0;
+       for (i = 1; i <= nn; i++)
+               nvme_alloc_ns(dev, i);
 
- out:
-       dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
-       return res;
+       return 0;
 }
 
 static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2499,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
        struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
-       nvme_clear_queue(nvmeq);
        nvme_put_dq(dq);
 }
 
@@ -2502,7 +2641,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        int i;
        u32 csts = -1;
 
-       dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
        if (dev->bar) {
@@ -2513,7 +2651,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
                        nvme_suspend_queue(nvmeq);
-                       nvme_clear_queue(nvmeq);
                }
        } else {
                nvme_disable_io_queues(dev);
@@ -2521,6 +2658,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                nvme_disable_queue(dev, 0);
        }
        nvme_dev_unmap(dev);
+
+       for (i = dev->queue_count - 1; i >= 0; i--)
+               nvme_clear_queue(dev->queues[i]);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2668,11 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        struct nvme_ns *ns;
 
        list_for_each_entry(ns, &dev->namespaces, list) {
-               if (ns->disk->flags & GENHD_FL_UP)
+               if (ns->disk->flags & GENHD_FL_UP) {
+                       if (blk_get_integrity(ns->disk))
+                               blk_integrity_unregister(ns->disk);
                        del_gendisk(ns->disk);
+               }
                if (!blk_queue_dying(ns->queue)) {
                        blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
@@ -2611,6 +2754,7 @@ static void nvme_free_dev(struct kref *kref)
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
        pci_dev_put(dev->pci_dev);
+       put_device(dev->device);
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2766,27 @@ static void nvme_free_dev(struct kref *kref)
 
 static int nvme_dev_open(struct inode *inode, struct file *f)
 {
-       struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
-                                                               miscdev);
-       kref_get(&dev->kref);
-       f->private_data = dev;
-       return 0;
+       struct nvme_dev *dev;
+       int instance = iminor(inode);
+       int ret = -ENODEV;
+
+       spin_lock(&dev_list_lock);
+       list_for_each_entry(dev, &dev_list, node) {
+               if (dev->instance == instance) {
+                       if (!dev->admin_q) {
+                               ret = -EWOULDBLOCK;
+                               break;
+                       }
+                       if (!kref_get_unless_zero(&dev->kref))
+                               break;
+                       f->private_data = dev;
+                       ret = 0;
+                       break;
+               }
+       }
+       spin_unlock(&dev_list_lock);
+
+       return ret;
 }
 
 static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2928,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                nvme_unfreeze_queues(dev);
                nvme_set_irq_hints(dev);
        }
-       dev->initialized = 1;
        return 0;
 }
 
@@ -2799,6 +2958,7 @@ static void nvme_reset_workfn(struct work_struct *work)
        dev->reset_workfn(work);
 }
 
+static void nvme_async_probe(struct work_struct *work);
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
@@ -2834,37 +2994,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto release;
 
        kref_init(&dev->kref);
-       result = nvme_dev_start(dev);
-       if (result)
+       dev->device = device_create(nvme_class, &pdev->dev,
+                               MKDEV(nvme_char_major, dev->instance),
+                               dev, "nvme%d", dev->instance);
+       if (IS_ERR(dev->device)) {
+               result = PTR_ERR(dev->device);
                goto release_pools;
+       }
+       get_device(dev->device);
 
-       if (dev->online_queues > 1)
-               result = nvme_dev_add(dev);
-       if (result)
-               goto shutdown;
-
-       scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
-       dev->miscdev.minor = MISC_DYNAMIC_MINOR;
-       dev->miscdev.parent = &pdev->dev;
-       dev->miscdev.name = dev->name;
-       dev->miscdev.fops = &nvme_dev_fops;
-       result = misc_register(&dev->miscdev);
-       if (result)
-               goto remove;
-
-       nvme_set_irq_hints(dev);
-
-       dev->initialized = 1;
+       INIT_WORK(&dev->probe_work, nvme_async_probe);
+       schedule_work(&dev->probe_work);
        return 0;
 
- remove:
-       nvme_dev_remove(dev);
-       nvme_dev_remove_admin(dev);
-       nvme_free_namespaces(dev);
- shutdown:
-       nvme_dev_shutdown(dev);
  release_pools:
-       nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
  release:
        nvme_release_instance(dev);
@@ -2877,6 +3020,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return result;
 }
 
+static void nvme_async_probe(struct work_struct *work)
+{
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
+       int result;
+
+       result = nvme_dev_start(dev);
+       if (result)
+               goto reset;
+
+       if (dev->online_queues > 1)
+               result = nvme_dev_add(dev);
+       if (result)
+               goto reset;
+
+       nvme_set_irq_hints(dev);
+       return;
+ reset:
+       if (!work_busy(&dev->reset_work)) {
+               dev->reset_workfn = nvme_reset_failed_dev;
+               queue_work(nvme_workq, &dev->reset_work);
+       }
+}
+
 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3068,12 @@ static void nvme_remove(struct pci_dev *pdev)
        spin_unlock(&dev_list_lock);
 
        pci_set_drvdata(pdev, NULL);
+       flush_work(&dev->probe_work);
        flush_work(&dev->reset_work);
-       misc_deregister(&dev->miscdev);
        nvme_dev_shutdown(dev);
        nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
+       device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
        nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3157,26 @@ static int __init nvme_init(void)
        else if (result > 0)
                nvme_major = result;
 
+       result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
+                                                       &nvme_dev_fops);
+       if (result < 0)
+               goto unregister_blkdev;
+       else if (result > 0)
+               nvme_char_major = result;
+
+       nvme_class = class_create(THIS_MODULE, "nvme");
+       if (!nvme_class)
+               goto unregister_chrdev;
+
        result = pci_register_driver(&nvme_driver);
        if (result)
-               goto unregister_blkdev;
+               goto destroy_class;
        return 0;
 
+ destroy_class:
+       class_destroy(nvme_class);
+ unregister_chrdev:
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
  unregister_blkdev:
        unregister_blkdev(nvme_major, "nvme");
  kill_workq:
@@ -3005,9 +3187,10 @@ static int __init nvme_init(void)
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       unregister_hotcpu_notifier(&nvme_nb);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
+       class_destroy(nvme_class);
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
        _nvme_check_size();
 }
index 5e78568026c339da939a33acd54cbd80891c5a10..e10196e0182d450667cf886421e9475c218f30c8 100644 (file)
@@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_dev *dev = ns->dev;
        dma_addr_t dma_addr;
        void *mem;
-       struct nvme_id_ctrl *id_ctrl;
        int res = SNTI_TRANSLATION_SUCCESS;
        int nvme_sc;
-       u8 ieee[4];
        int xfer_len;
        __be32 tmp_id = cpu_to_be32(ns->ns_id);
 
@@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out_dma;
        }
 
-       /* nvme controller identify */
-       nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
-       res = nvme_trans_status_code(hdr, nvme_sc);
-       if (res)
-               goto out_free;
-       if (nvme_sc) {
-               res = nvme_sc;
-               goto out_free;
-       }
-       id_ctrl = mem;
-
-       /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
-       ieee[0] = id_ctrl->ieee[0] << 4;
-       ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
-       ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
-       ieee[3] = id_ctrl->ieee[2] >> 4;
-
-       memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+       memset(inq_response, 0, alloc_len);
        inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
-       inq_response[3] = 20;      /* Page Length */
-       /* Designation Descriptor start */
-       inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
-       inq_response[5] = 0x03;    /* PIV=0b | Asso=00b | Designator Type=3h */
-       inq_response[6] = 0x00;    /* Rsvd */
-       inq_response[7] = 16;      /* Designator Length */
-       /* Designator start */
-       inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
-       inq_response[9] = ieee[2];        /* IEEE ID */
-       inq_response[10] = ieee[1];       /* IEEE ID */
-       inq_response[11] = ieee[0];       /* IEEE ID| Vendor Specific ID... */
-       inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
-       inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
-       inq_response[14] = dev->serial[0];
-       inq_response[15] = dev->serial[1];
-       inq_response[16] = dev->model[0];
-       inq_response[17] = dev->model[1];
-       memcpy(&inq_response[18], &tmp_id, sizeof(u32));
-       /* Last 2 bytes are zero */
+       if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
+               struct nvme_id_ns *id_ns = mem;
+               void *eui = id_ns->eui64;
+               int len = sizeof(id_ns->eui64);
 
-       xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+               nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+               res = nvme_trans_status_code(hdr, nvme_sc);
+               if (res)
+                       goto out_free;
+               if (nvme_sc) {
+                       res = nvme_sc;
+                       goto out_free;
+               }
+
+               if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
+                       if (bitmap_empty(eui, len * 8)) {
+                               eui = id_ns->nguid;
+                               len = sizeof(id_ns->nguid);
+                       }
+               }
+               if (bitmap_empty(eui, len * 8))
+                       goto scsi_string;
+
+               inq_response[3] = 4 + len; /* Page Length */
+               /* Designation Descriptor start */
+               inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
+               inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
+               inq_response[6] = 0x00;    /* Rsvd */
+               inq_response[7] = len;     /* Designator Length */
+               memcpy(&inq_response[8], eui, len);
+       } else {
+ scsi_string:
+               if (alloc_len < 72) {
+                       res = nvme_trans_completion(hdr,
+                                       SAM_STAT_CHECK_CONDITION,
+                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+                       goto out_free;
+               }
+               inq_response[3] = 0x48;    /* Page Length */
+               /* Designation Descriptor start */
+               inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
+               inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
+               inq_response[6] = 0x00;    /* Rsvd */
+               inq_response[7] = 0x44;    /* Designator Length */
+
+               sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
+               memcpy(&inq_response[12], dev->model, sizeof(dev->model));
+               sprintf(&inq_response[52], "%04x", tmp_id);
+               memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
+       }
+       xfer_len = alloc_len;
        res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
@@ -1600,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
                /* 10 Byte CDB */
                *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
                        parm_list[MODE_SELECT_10_BD_OFFSET + 1];
-               *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+               *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
                                MODE_SELECT_10_LLBAA_MASK;
        } else {
                /* 6 Byte CDB */
@@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        page_code = GET_INQ_PAGE_CODE(cmd);
        alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
 
-       inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+       inq_response = kmalloc(alloc_len, GFP_KERNEL);
        if (inq_response == NULL) {
                res = -ENOMEM;
                goto out_mem;
index 8a86b62466f7ce72b54853b283e03fd495df8083..b40af3203089c846db053dfac879567230509299 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/module.h>
+#include <linux/blk-mq.h>
 #include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
@@ -340,9 +341,7 @@ struct rbd_device {
 
        char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
 
-       struct list_head        rq_queue;       /* incoming rq queue */
        spinlock_t              lock;           /* queue, flags, open_count */
-       struct work_struct      rq_work;
 
        struct rbd_image_header header;
        unsigned long           flags;          /* possibly lock protected */
@@ -360,6 +359,9 @@ struct rbd_device {
        atomic_t                parent_ref;
        struct rbd_device       *parent;
 
+       /* Block layer tags. */
+       struct blk_mq_tag_set   tag_set;
+
        /* protects updating the header */
        struct rw_semaphore     header_rwsem;
 
@@ -1817,7 +1819,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
 
        /*
         * We support a 64-bit length, but ultimately it has to be
-        * passed to blk_end_request(), which takes an unsigned int.
+        * passed to the block layer, which just supports a 32-bit
+        * length field.
         */
        obj_request->xferred = osd_req->r_reply_op_len[0];
        rbd_assert(obj_request->xferred < (u64)UINT_MAX);
@@ -2275,7 +2278,10 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
                more = obj_request->which < img_request->obj_request_count - 1;
        } else {
                rbd_assert(img_request->rq != NULL);
-               more = blk_end_request(img_request->rq, result, xferred);
+
+               more = blk_update_request(img_request->rq, result, xferred);
+               if (!more)
+                       __blk_mq_end_request(img_request->rq, result);
        }
 
        return more;
@@ -3304,8 +3310,10 @@ out:
        return ret;
 }
 
-static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
+static void rbd_queue_workfn(struct work_struct *work)
 {
+       struct request *rq = blk_mq_rq_from_pdu(work);
+       struct rbd_device *rbd_dev = rq->q->queuedata;
        struct rbd_img_request *img_request;
        struct ceph_snap_context *snapc = NULL;
        u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
@@ -3314,6 +3322,13 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
        u64 mapping_size;
        int result;
 
+       if (rq->cmd_type != REQ_TYPE_FS) {
+               dout("%s: non-fs request type %d\n", __func__,
+                       (int) rq->cmd_type);
+               result = -EIO;
+               goto err;
+       }
+
        if (rq->cmd_flags & REQ_DISCARD)
                op_type = OBJ_OP_DISCARD;
        else if (rq->cmd_flags & REQ_WRITE)
@@ -3359,6 +3374,8 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
                goto err_rq;    /* Shouldn't happen */
        }
 
+       blk_mq_start_request(rq);
+
        down_read(&rbd_dev->header_rwsem);
        mapping_size = rbd_dev->mapping.size;
        if (op_type != OBJ_OP_READ) {
@@ -3404,53 +3421,18 @@ err_rq:
                rbd_warn(rbd_dev, "%s %llx at %llx result %d",
                         obj_op_name(op_type), length, offset, result);
        ceph_put_snap_context(snapc);
-       blk_end_request_all(rq, result);
+err:
+       blk_mq_end_request(rq, result);
 }
 
-static void rbd_request_workfn(struct work_struct *work)
+static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+               const struct blk_mq_queue_data *bd)
 {
-       struct rbd_device *rbd_dev =
-           container_of(work, struct rbd_device, rq_work);
-       struct request *rq, *next;
-       LIST_HEAD(requests);
-
-       spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
-       list_splice_init(&rbd_dev->rq_queue, &requests);
-       spin_unlock_irq(&rbd_dev->lock);
+       struct request *rq = bd->rq;
+       struct work_struct *work = blk_mq_rq_to_pdu(rq);
 
-       list_for_each_entry_safe(rq, next, &requests, queuelist) {
-               list_del_init(&rq->queuelist);
-               rbd_handle_request(rbd_dev, rq);
-       }
-}
-
-/*
- * Called with q->queue_lock held and interrupts disabled, possibly on
- * the way to schedule().  Do not sleep here!
- */
-static void rbd_request_fn(struct request_queue *q)
-{
-       struct rbd_device *rbd_dev = q->queuedata;
-       struct request *rq;
-       int queued = 0;
-
-       rbd_assert(rbd_dev);
-
-       while ((rq = blk_fetch_request(q))) {
-               /* Ignore any non-FS requests that filter through. */
-               if (rq->cmd_type != REQ_TYPE_FS) {
-                       dout("%s: non-fs request type %d\n", __func__,
-                               (int) rq->cmd_type);
-                       __blk_end_request_all(rq, 0);
-                       continue;
-               }
-
-               list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
-               queued++;
-       }
-
-       if (queued)
-               queue_work(rbd_wq, &rbd_dev->rq_work);
+       queue_work(rbd_wq, work);
+       return BLK_MQ_RQ_QUEUE_OK;
 }
 
 /*
@@ -3511,6 +3493,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
                del_gendisk(disk);
                if (disk->queue)
                        blk_cleanup_queue(disk->queue);
+               blk_mq_free_tag_set(&rbd_dev->tag_set);
        }
        put_disk(disk);
 }
@@ -3694,7 +3677,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
 
        ret = rbd_dev_header_info(rbd_dev);
        if (ret)
-               return ret;
+               goto out;
 
        /*
         * If there is a parent, see if it has disappeared due to the
@@ -3703,30 +3686,46 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
        if (rbd_dev->parent) {
                ret = rbd_dev_v2_parent_info(rbd_dev);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
-               if (rbd_dev->mapping.size != rbd_dev->header.image_size)
-                       rbd_dev->mapping.size = rbd_dev->header.image_size;
+               rbd_dev->mapping.size = rbd_dev->header.image_size;
        } else {
                /* validate mapped snapshot's EXISTS flag */
                rbd_exists_validate(rbd_dev);
        }
 
+out:
        up_write(&rbd_dev->header_rwsem);
-
-       if (mapping_size != rbd_dev->mapping.size)
+       if (!ret && mapping_size != rbd_dev->mapping.size)
                rbd_dev_update_size(rbd_dev);
 
+       return ret;
+}
+
+static int rbd_init_request(void *data, struct request *rq,
+               unsigned int hctx_idx, unsigned int request_idx,
+               unsigned int numa_node)
+{
+       struct work_struct *work = blk_mq_rq_to_pdu(rq);
+
+       INIT_WORK(work, rbd_queue_workfn);
        return 0;
 }
 
+static struct blk_mq_ops rbd_mq_ops = {
+       .queue_rq       = rbd_queue_rq,
+       .map_queue      = blk_mq_map_queue,
+       .init_request   = rbd_init_request,
+};
+
 static int rbd_init_disk(struct rbd_device *rbd_dev)
 {
        struct gendisk *disk;
        struct request_queue *q;
        u64 segment_size;
+       int err;
 
        /* create gendisk info */
        disk = alloc_disk(single_major ?
@@ -3744,10 +3743,25 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        disk->fops = &rbd_bd_ops;
        disk->private_data = rbd_dev;
 
-       q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
-       if (!q)
+       memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
+       rbd_dev->tag_set.ops = &rbd_mq_ops;
+       rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
+       rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
+       rbd_dev->tag_set.flags =
+               BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+       rbd_dev->tag_set.nr_hw_queues = 1;
+       rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
+
+       err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
+       if (err)
                goto out_disk;
 
+       q = blk_mq_init_queue(&rbd_dev->tag_set);
+       if (IS_ERR(q)) {
+               err = PTR_ERR(q);
+               goto out_tag_set;
+       }
+
        /* We use the default size, but let's be explicit about it. */
        blk_queue_physical_block_size(q, SECTOR_SIZE);
 
@@ -3773,10 +3787,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        rbd_dev->disk = disk;
 
        return 0;
+out_tag_set:
+       blk_mq_free_tag_set(&rbd_dev->tag_set);
 out_disk:
        put_disk(disk);
-
-       return -ENOMEM;
+       return err;
 }
 
 /*
@@ -4033,8 +4048,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
                return NULL;
 
        spin_lock_init(&rbd_dev->lock);
-       INIT_LIST_HEAD(&rbd_dev->rq_queue);
-       INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
        rbd_dev->flags = 0;
        atomic_set(&rbd_dev->parent_ref, 0);
        INIT_LIST_HEAD(&rbd_dev->node);
@@ -4274,32 +4287,22 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        }
 
        /*
-        * We always update the parent overlap.  If it's zero we
-        * treat it specially.
+        * We always update the parent overlap.  If it's zero we issue
+        * a warning, as we will proceed as if there was no parent.
         */
-       rbd_dev->parent_overlap = overlap;
        if (!overlap) {
-
-               /* A null parent_spec indicates it's the initial probe */
-
                if (parent_spec) {
-                       /*
-                        * The overlap has become zero, so the clone
-                        * must have been resized down to 0 at some
-                        * point.  Treat this the same as a flatten.
-                        */
-                       rbd_dev_parent_put(rbd_dev);
-                       pr_info("%s: clone image now standalone\n",
-                               rbd_dev->disk->disk_name);
+                       /* refresh, careful to warn just once */
+                       if (rbd_dev->parent_overlap)
+                               rbd_warn(rbd_dev,
+                                   "clone now standalone (overlap became 0)");
                } else {
-                       /*
-                        * For the initial probe, if we find the
-                        * overlap is zero we just pretend there was
-                        * no parent image.
-                        */
-                       rbd_warn(rbd_dev, "ignoring parent with overlap 0");
+                       /* initial probe */
+                       rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
                }
        }
+       rbd_dev->parent_overlap = overlap;
+
 out:
        ret = 0;
 out_err:
@@ -4770,36 +4773,6 @@ static inline size_t next_token(const char **buf)
        return strcspn(*buf, spaces);   /* Return token length */
 }
 
-/*
- * Finds the next token in *buf, and if the provided token buffer is
- * big enough, copies the found token into it.  The result, if
- * copied, is guaranteed to be terminated with '\0'.  Note that *buf
- * must be terminated with '\0' on entry.
- *
- * Returns the length of the token found (not including the '\0').
- * Return value will be 0 if no token is found, and it will be >=
- * token_size if the token would not fit.
- *
- * The *buf pointer will be updated to point beyond the end of the
- * found token.  Note that this occurs even if the token buffer is
- * too small to hold it.
- */
-static inline size_t copy_token(const char **buf,
-                               char *token,
-                               size_t token_size)
-{
-        size_t len;
-
-       len = next_token(buf);
-       if (len < token_size) {
-               memcpy(token, *buf, len);
-               *(token + len) = '\0';
-       }
-       *buf += len;
-
-        return len;
-}
-
 /*
  * Finds the next token in *buf, dynamically allocates a buffer big
  * enough to hold a copy of it, and copies the token into the new
index cdfbd21e35975178fa0c4cece78a354ef1d53007..655e570b9b3170339b47b77046290f832f857001 100644 (file)
@@ -28,8 +28,7 @@ struct virtio_blk_vq {
        char name[VQ_NAME_LEN];
 } ____cacheline_aligned_in_smp;
 
-struct virtio_blk
-{
+struct virtio_blk {
        struct virtio_device *vdev;
 
        /* The disk structure for the kernel. */
@@ -52,8 +51,7 @@ struct virtio_blk
        struct virtio_blk_vq *vqs;
 };
 
-struct virtblk_req
-{
+struct virtblk_req {
        struct request *req;
        struct virtio_blk_outhdr out_hdr;
        struct virtio_scsi_inhdr in_hdr;
@@ -575,6 +573,12 @@ static int virtblk_probe(struct virtio_device *vdev)
        u16 min_io_size;
        u8 physical_block_exp, alignment_offset;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
                             GFP_KERNEL);
        if (err < 0)
index 8e233edd7a097a0f91d323071c97f16e43308e5d..871bd3550cb0e0842296771412e749225542ffac 100644 (file)
@@ -528,7 +528,7 @@ out_cleanup:
 static inline void update_used_max(struct zram *zram,
                                        const unsigned long pages)
 {
-       int old_max, cur_max;
+       unsigned long old_max, cur_max;
 
        old_max = atomic_long_read(&zram->stats.max_used_pages);
 
index 3ca2e1bf7bfacd2ecdd3f8c2b85fa29ffbcd360a..8c1bf61905337612de990eb5ad3cce707ab3d645 100644 (file)
@@ -273,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
 
        /* Intel Bluetooth devices */
+       { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
        { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
index ec318bf434a6c3d890d26060a9c388295bde807e..1786574536b21ef06415ae62ff14bdbfd77b3f2c 100644 (file)
@@ -157,12 +157,16 @@ static int ipmi_release(struct inode *inode, struct file *file)
 {
        struct ipmi_file_private *priv = file->private_data;
        int                      rv;
+       struct  ipmi_recv_msg *msg, *next;
 
        rv = ipmi_destroy_user(priv->user);
        if (rv)
                return rv;
 
-       /* FIXME - free the messages in the list. */
+       list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+               ipmi_free_recv_msg(msg);
+
+
        kfree(priv);
 
        return 0;
index 6b65fa4e0c5586895df2b26ee499c9e5ad4d8b2c..9bb592872532b1853efb00930c001e54df5fa7ed 100644 (file)
@@ -1483,14 +1483,10 @@ static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
        smi_msg->msgid = msgid;
 }
 
-static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
-                    struct ipmi_smi_msg *smi_msg, int priority)
+static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
+                                            struct ipmi_smi_msg *smi_msg,
+                                            int priority)
 {
-       int run_to_completion = intf->run_to_completion;
-       unsigned long flags;
-
-       if (!run_to_completion)
-               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
        if (intf->curr_msg) {
                if (priority > 0)
                        list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
@@ -1500,8 +1496,25 @@ static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
        } else {
                intf->curr_msg = smi_msg;
        }
-       if (!run_to_completion)
+
+       return smi_msg;
+}
+
+
+static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+                    struct ipmi_smi_msg *smi_msg, int priority)
+{
+       int run_to_completion = intf->run_to_completion;
+
+       if (run_to_completion) {
+               smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+       } else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+               smi_msg = smi_add_send_msg(intf, smi_msg, priority);
                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+       }
 
        if (smi_msg)
                handlers->sender(intf->send_info, smi_msg);
@@ -1985,7 +1998,9 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
        seq_printf(m, "%x", intf->channels[0].address);
        for (i = 1; i < IPMI_MAX_CHANNELS; i++)
                seq_printf(m, " %x", intf->channels[i].address);
-       return seq_putc(m, '\n');
+       seq_putc(m, '\n');
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2004,9 +2019,11 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
 {
        ipmi_smi_t intf = m->private;
 
-       return seq_printf(m, "%u.%u\n",
-                      ipmi_version_major(&intf->bmc->id),
-                      ipmi_version_minor(&intf->bmc->id));
+       seq_printf(m, "%u.%u\n",
+                  ipmi_version_major(&intf->bmc->id),
+                  ipmi_version_minor(&intf->bmc->id));
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_version_proc_open(struct inode *inode, struct file *file)
@@ -2353,11 +2370,28 @@ static struct attribute *bmc_dev_attrs[] = {
        &dev_attr_additional_device_support.attr,
        &dev_attr_manufacturer_id.attr,
        &dev_attr_product_id.attr,
+       &dev_attr_aux_firmware_revision.attr,
+       &dev_attr_guid.attr,
        NULL
 };
 
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+                                      struct attribute *attr, int idx)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct bmc_device *bmc = to_bmc_device(dev);
+       umode_t mode = attr->mode;
+
+       if (attr == &dev_attr_aux_firmware_revision.attr)
+               return bmc->id.aux_firmware_revision_set ? mode : 0;
+       if (attr == &dev_attr_guid.attr)
+               return bmc->guid_set ? mode : 0;
+       return mode;
+}
+
 static struct attribute_group bmc_dev_attr_group = {
        .attrs          = bmc_dev_attrs,
+       .is_visible     = bmc_dev_attr_is_visible,
 };
 
 static const struct attribute_group *bmc_dev_attr_groups[] = {
@@ -2380,13 +2414,6 @@ cleanup_bmc_device(struct kref *ref)
 {
        struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
 
-       if (bmc->id.aux_firmware_revision_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_aux_firmware_revision);
-       if (bmc->guid_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_guid);
-
        platform_device_unregister(&bmc->pdev);
 }
 
@@ -2407,33 +2434,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
        mutex_unlock(&ipmidriver_mutex);
 }
 
-static int create_bmc_files(struct bmc_device *bmc)
-{
-       int err;
-
-       if (bmc->id.aux_firmware_revision_set) {
-               err = device_create_file(&bmc->pdev.dev,
-                                        &dev_attr_aux_firmware_revision);
-               if (err)
-                       goto out;
-       }
-       if (bmc->guid_set) {
-               err = device_create_file(&bmc->pdev.dev,
-                                        &dev_attr_guid);
-               if (err)
-                       goto out_aux_firm;
-       }
-
-       return 0;
-
-out_aux_firm:
-       if (bmc->id.aux_firmware_revision_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_aux_firmware_revision);
-out:
-       return err;
-}
-
 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
 {
        int               rv;
@@ -2522,15 +2522,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
                        return rv;
                }
 
-               rv = create_bmc_files(bmc);
-               if (rv) {
-                       mutex_lock(&ipmidriver_mutex);
-                       platform_device_unregister(&bmc->pdev);
-                       mutex_unlock(&ipmidriver_mutex);
-
-                       return rv;
-               }
-
                dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
                         "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
                         bmc->id.manufacturer_id,
@@ -4212,7 +4203,6 @@ static void need_waiter(ipmi_smi_t intf)
 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
 
-/* FIXME - convert these to slabs. */
 static void free_smi_msg(struct ipmi_smi_msg *msg)
 {
        atomic_dec(&smi_msg_inuse_count);
index 967b73aa4e66d31481d6ae9488ab567530661f76..f6646ed3047e09a3656b089491f0afaa14982af1 100644 (file)
@@ -321,6 +321,18 @@ static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
 static void cleanup_ipmi_si(void);
 
+#ifdef DEBUG_TIMING
+void debug_timestamp(char *msg)
+{
+       struct timespec64 t;
+
+       getnstimeofday64(&t);
+       pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(x)
+#endif
+
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
 {
@@ -358,9 +370,6 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 {
        int              rv;
-#ifdef DEBUG_TIMING
-       struct timeval t;
-#endif
 
        if (!smi_info->waiting_msg) {
                smi_info->curr_msg = NULL;
@@ -370,10 +379,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 
                smi_info->curr_msg = smi_info->waiting_msg;
                smi_info->waiting_msg = NULL;
-#ifdef DEBUG_TIMING
-               do_gettimeofday(&t);
-               printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+               debug_timestamp("Start2");
                err = atomic_notifier_call_chain(&xaction_notifier_list,
                                0, smi_info);
                if (err & NOTIFY_STOP_MASK) {
@@ -582,12 +588,8 @@ static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
 static void handle_transaction_done(struct smi_info *smi_info)
 {
        struct ipmi_smi_msg *msg;
-#ifdef DEBUG_TIMING
-       struct timeval t;
 
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Done");
        switch (smi_info->si_state) {
        case SI_NORMAL:
                if (!smi_info->curr_msg)
@@ -929,24 +931,15 @@ static void sender(void                *send_info,
        struct smi_info   *smi_info = send_info;
        enum si_sm_result result;
        unsigned long     flags;
-#ifdef DEBUG_TIMING
-       struct timeval    t;
-#endif
-
-       BUG_ON(smi_info->waiting_msg);
-       smi_info->waiting_msg = msg;
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Enqueue");
 
        if (smi_info->run_to_completion) {
                /*
                 * If we are running to completion, start it and run
                 * transactions until everything is clear.
                 */
-               smi_info->curr_msg = smi_info->waiting_msg;
+               smi_info->curr_msg = msg;
                smi_info->waiting_msg = NULL;
 
                /*
@@ -964,6 +957,15 @@ static void sender(void                *send_info,
        }
 
        spin_lock_irqsave(&smi_info->si_lock, flags);
+       /*
+        * The following two lines don't need to be under the lock for
+        * the lock's sake, but they do need SMP memory barriers to
+        * avoid getting things out of order.  We are already claiming
+        * the lock, anyway, so just do it under the lock to avoid the
+        * ordering problem.
+        */
+       BUG_ON(smi_info->waiting_msg);
+       smi_info->waiting_msg = msg;
        check_start_timer_thread(smi_info);
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
@@ -989,18 +991,18 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
  * we are spinning in kipmid looking for something and not delaying
  * between checks
  */
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
+static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
 {
        ts->tv_nsec = -1;
 }
-static inline int ipmi_si_is_busy(struct timespec *ts)
+static inline int ipmi_si_is_busy(struct timespec64 *ts)
 {
        return ts->tv_nsec != -1;
 }
 
 static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
                                        const struct smi_info *smi_info,
-                                       struct timespec *busy_until)
+                                       struct timespec64 *busy_until)
 {
        unsigned int max_busy_us = 0;
 
@@ -1009,12 +1011,13 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
                ipmi_si_set_not_busy(busy_until);
        else if (!ipmi_si_is_busy(busy_until)) {
-               getnstimeofday(busy_until);
-               timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+               getnstimeofday64(busy_until);
+               timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
        } else {
-               struct timespec now;
-               getnstimeofday(&now);
-               if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+               struct timespec64 now;
+
+               getnstimeofday64(&now);
+               if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
                        ipmi_si_set_not_busy(busy_until);
                        return 0;
                }
@@ -1037,7 +1040,7 @@ static int ipmi_thread(void *data)
        struct smi_info *smi_info = data;
        unsigned long flags;
        enum si_sm_result smi_result;
-       struct timespec busy_until;
+       struct timespec64 busy_until;
 
        ipmi_si_set_not_busy(&busy_until);
        set_user_nice(current, MAX_NICE);
@@ -1128,15 +1131,10 @@ static void smi_timeout(unsigned long data)
        unsigned long     jiffies_now;
        long              time_diff;
        long              timeout;
-#ifdef DEBUG_TIMING
-       struct timeval    t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Timer");
+
        jiffies_now = jiffies;
        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
                     * SI_USEC_PER_JIFFY);
@@ -1173,18 +1171,13 @@ static irqreturn_t si_irq_handler(int irq, void *data)
 {
        struct smi_info *smi_info = data;
        unsigned long   flags;
-#ifdef DEBUG_TIMING
-       struct timeval  t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Interrupt");
+
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
        return IRQ_HANDLED;
@@ -2038,18 +2031,13 @@ static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
 {
        struct smi_info *smi_info = context;
        unsigned long   flags;
-#ifdef DEBUG_TIMING
-       struct timeval t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("ACPI_GPE");
+
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 
@@ -2071,7 +2059,6 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
        if (!info->irq)
                return 0;
 
-       /* FIXME - is level triggered right? */
        status = acpi_install_gpe_handler(NULL,
                                          info->irq,
                                          ACPI_GPE_LEVEL_TRIGGERED,
@@ -2998,7 +2985,9 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
 {
        struct smi_info *smi = m->private;
 
-       return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+       seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3060,16 +3049,18 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
 {
        struct smi_info *smi = m->private;
 
-       return seq_printf(m,
-                      "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
-                      si_to_str[smi->si_type],
-                      addr_space_to_str[smi->io.addr_type],
-                      smi->io.addr_data,
-                      smi->io.regspacing,
-                      smi->io.regsize,
-                      smi->io.regshift,
-                      smi->irq,
-                      smi->slave_addr);
+       seq_printf(m,
+                  "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+                  si_to_str[smi->si_type],
+                  addr_space_to_str[smi->io.addr_type],
+                  smi->io.addr_data,
+                  smi->io.regspacing,
+                  smi->io.regsize,
+                  smi->io.regshift,
+                  smi->irq,
+                  smi->slave_addr);
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_params_proc_open(struct inode *inode, struct file *file)
index 982b96323f823b8402ede2ceec7c0cb85c042ec4..f6e378dac5f5b1031530839d5967ab96607f1f7b 100644 (file)
@@ -1097,8 +1097,6 @@ static int ssif_remove(struct i2c_client *client)
        if (!ssif_info)
                return 0;
 
-       i2c_set_clientdata(client, NULL);
-
        /*
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
@@ -1198,7 +1196,9 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
 
 static int smi_type_proc_show(struct seq_file *m, void *v)
 {
-       return seq_puts(m, "ssif\n");
+       seq_puts(m, "ssif\n");
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
index 26afb56a807300026507fdf8624e8a597d1acdc0..fae2dbbf57459fe4f4ac01bb0415a02abbda9e18 100644 (file)
@@ -1986,7 +1986,10 @@ static int virtcons_probe(struct virtio_device *vdev)
        bool multiport;
        bool early = early_put_chars != NULL;
 
-       if (!vdev->config->get) {
+       /* We only need a config space if features are offered */
+       if (!vdev->config->get &&
+           (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
+            || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
                dev_err(&vdev->dev, "%s failure: config access disabled\n",
                        __func__);
                return -EINVAL;
index 91f86131bb7aa62b0c4632e2defa1f8b2f4e6abc..0b474a04730fe4d2c588cb1a3a1818dbff61f995 100644 (file)
@@ -102,12 +102,12 @@ config COMMON_CLK_AXI_CLKGEN
          Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
          FPGAs. It is commonly used in Analog Devices' reference designs.
 
-config CLK_PPC_CORENET
-       bool "Clock driver for PowerPC corenet platforms"
-       depends on PPC_E500MC && OF
+config CLK_QORIQ
+       bool "Clock driver for Freescale QorIQ platforms"
+       depends on (PPC_E500MC || ARM) && OF
        ---help---
-         This adds the clock driver support for Freescale PowerPC corenet
-         platforms using common clock framework.
+         This adds the clock driver support for Freescale QorIQ platforms
+         using common clock framework.
 
 config COMMON_CLK_XGENE
        bool "Clock driver for APM XGene SoC"
@@ -135,6 +135,14 @@ config COMMON_CLK_PXA
        ---help---
          Sypport for the Marvell PXA SoC.
 
+config COMMON_CLK_CDCE706
+       tristate "Clock driver for TI CDCE706 clock synthesizer"
+       depends on I2C
+       select REGMAP_I2C
+       select RATIONAL
+       ---help---
+         This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
index d5fba5bc6e1bc1f07991be58f367c30a253a58f7..d478ceb69c5fc6b4a1bee49276b37763db36c79d 100644 (file)
@@ -16,9 +16,11 @@ endif
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_MACH_ASM9260)             += clk-asm9260.o
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)               += clk-axm5516.o
 obj-$(CONFIG_ARCH_BCM2835)             += clk-bcm2835.o
+obj-$(CONFIG_COMMON_CLK_CDCE706)       += clk-cdce706.o
 obj-$(CONFIG_ARCH_CLPS711X)            += clk-clps711x.o
 obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
 obj-$(CONFIG_ARCH_HIGHBANK)            += clk-highbank.o
@@ -30,7 +32,7 @@ obj-$(CONFIG_ARCH_MOXART)             += clk-moxart.o
 obj-$(CONFIG_ARCH_NOMADIK)             += clk-nomadik.o
 obj-$(CONFIG_ARCH_NSPIRE)              += clk-nspire.o
 obj-$(CONFIG_COMMON_CLK_PALMAS)                += clk-palmas.o
-obj-$(CONFIG_CLK_PPC_CORENET)          += clk-ppc-corenet.o
+obj-$(CONFIG_CLK_QORIQ)                        += clk-qoriq.o
 obj-$(CONFIG_COMMON_CLK_RK808)         += clk-rk808.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)       += clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)                += clk-si5351.o
index bbdb1b985c9146a5e82013fd8d8ab20ea3ebc5fc..86c8a073dcc32a20b98f4c862d5622b11ffe1d57 100644 (file)
@@ -56,6 +56,8 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
 
 static long clk_programmable_determine_rate(struct clk_hw *hw,
                                            unsigned long rate,
+                                           unsigned long min_rate,
+                                           unsigned long max_rate,
                                            unsigned long *best_parent_rate,
                                            struct clk_hw **best_parent_hw)
 {
index 1c06f6f3a8c59959b90e90f554c048e1535a1893..05abae89262e20923f5f39321c33165bdab6ea7f 100644 (file)
@@ -1032,6 +1032,8 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate,
+               unsigned long max_rate,
                unsigned long *best_parent_rate, struct clk_hw **best_parent)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
new file mode 100644 (file)
index 0000000..88f4ff6
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2014 Oleksij Rempel <linux@rempel-privat.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/alphascale,asm9260.h>
+
+#define HW_AHBCLKCTRL0         0x0020
+#define HW_AHBCLKCTRL1         0x0030
+#define HW_SYSPLLCTRL          0x0100
+#define HW_MAINCLKSEL          0x0120
+#define HW_MAINCLKUEN          0x0124
+#define HW_UARTCLKSEL          0x0128
+#define HW_UARTCLKUEN          0x012c
+#define HW_I2S0CLKSEL          0x0130
+#define HW_I2S0CLKUEN          0x0134
+#define HW_I2S1CLKSEL          0x0138
+#define HW_I2S1CLKUEN          0x013c
+#define HW_WDTCLKSEL           0x0160
+#define HW_WDTCLKUEN           0x0164
+#define HW_CLKOUTCLKSEL                0x0170
+#define HW_CLKOUTCLKUEN                0x0174
+#define HW_CPUCLKDIV           0x017c
+#define HW_SYSAHBCLKDIV                0x0180
+#define HW_I2S0MCLKDIV         0x0190
+#define HW_I2S0SCLKDIV         0x0194
+#define HW_I2S1MCLKDIV         0x0188
+#define HW_I2S1SCLKDIV         0x018c
+#define HW_UART0CLKDIV         0x0198
+#define HW_UART1CLKDIV         0x019c
+#define HW_UART2CLKDIV         0x01a0
+#define HW_UART3CLKDIV         0x01a4
+#define HW_UART4CLKDIV         0x01a8
+#define HW_UART5CLKDIV         0x01ac
+#define HW_UART6CLKDIV         0x01b0
+#define HW_UART7CLKDIV         0x01b4
+#define HW_UART8CLKDIV         0x01b8
+#define HW_UART9CLKDIV         0x01bc
+#define HW_SPI0CLKDIV          0x01c0
+#define HW_SPI1CLKDIV          0x01c4
+#define HW_QUADSPICLKDIV       0x01c8
+#define HW_SSP0CLKDIV          0x01d0
+#define HW_NANDCLKDIV          0x01d4
+#define HW_TRACECLKDIV         0x01e0
+#define HW_CAMMCLKDIV          0x01e8
+#define HW_WDTCLKDIV           0x01ec
+#define HW_CLKOUTCLKDIV                0x01f4
+#define HW_MACCLKDIV           0x01f8
+#define HW_LCDCLKDIV           0x01fc
+#define HW_ADCANACLKDIV                0x0200
+
+static struct clk *clks[MAX_CLKS];
+static struct clk_onecell_data clk_data;
+static DEFINE_SPINLOCK(asm9260_clk_lock);
+
+struct asm9260_div_clk {
+       unsigned int idx;
+       const char *name;
+       const char *parent_name;
+       u32 reg;
+};
+
+struct asm9260_gate_data {
+       unsigned int idx;
+       const char *name;
+       const char *parent_name;
+       u32 reg;
+       u8 bit_idx;
+       unsigned long flags;
+};
+
+struct asm9260_mux_clock {
+       u8                      mask;
+       u32                     *table;
+       const char              *name;
+       const char              **parent_names;
+       u8                      num_parents;
+       unsigned long           offset;
+       unsigned long           flags;
+};
+
+static void __iomem *base;
+
+static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
+       { CLKID_SYS_CPU,        "cpu_div", "main_gate", HW_CPUCLKDIV },
+       { CLKID_SYS_AHB,        "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
+
+       /* i2s has two deviders: one for only external mclk and internal
+        * devider for all clks. */
+       { CLKID_SYS_I2S0M,      "i2s0m_div", "i2s0_mclk",  HW_I2S0MCLKDIV },
+       { CLKID_SYS_I2S1M,      "i2s1m_div", "i2s1_mclk",  HW_I2S1MCLKDIV },
+       { CLKID_SYS_I2S0S,      "i2s0s_div", "i2s0_gate",  HW_I2S0SCLKDIV },
+       { CLKID_SYS_I2S1S,      "i2s1s_div", "i2s0_gate",  HW_I2S1SCLKDIV },
+
+       { CLKID_SYS_UART0,      "uart0_div", "uart_gate", HW_UART0CLKDIV },
+       { CLKID_SYS_UART1,      "uart1_div", "uart_gate", HW_UART1CLKDIV },
+       { CLKID_SYS_UART2,      "uart2_div", "uart_gate", HW_UART2CLKDIV },
+       { CLKID_SYS_UART3,      "uart3_div", "uart_gate", HW_UART3CLKDIV },
+       { CLKID_SYS_UART4,      "uart4_div", "uart_gate", HW_UART4CLKDIV },
+       { CLKID_SYS_UART5,      "uart5_div", "uart_gate", HW_UART5CLKDIV },
+       { CLKID_SYS_UART6,      "uart6_div", "uart_gate", HW_UART6CLKDIV },
+       { CLKID_SYS_UART7,      "uart7_div", "uart_gate", HW_UART7CLKDIV },
+       { CLKID_SYS_UART8,      "uart8_div", "uart_gate", HW_UART8CLKDIV },
+       { CLKID_SYS_UART9,      "uart9_div", "uart_gate", HW_UART9CLKDIV },
+
+       { CLKID_SYS_SPI0,       "spi0_div",     "main_gate", HW_SPI0CLKDIV },
+       { CLKID_SYS_SPI1,       "spi1_div",     "main_gate", HW_SPI1CLKDIV },
+       { CLKID_SYS_QUADSPI,    "quadspi_div",  "main_gate", HW_QUADSPICLKDIV },
+       { CLKID_SYS_SSP0,       "ssp0_div",     "main_gate", HW_SSP0CLKDIV },
+       { CLKID_SYS_NAND,       "nand_div",     "main_gate", HW_NANDCLKDIV },
+       { CLKID_SYS_TRACE,      "trace_div",    "main_gate", HW_TRACECLKDIV },
+       { CLKID_SYS_CAMM,       "camm_div",     "main_gate", HW_CAMMCLKDIV },
+       { CLKID_SYS_MAC,        "mac_div",      "main_gate", HW_MACCLKDIV },
+       { CLKID_SYS_LCD,        "lcd_div",      "main_gate", HW_LCDCLKDIV },
+       { CLKID_SYS_ADCANA,     "adcana_div",   "main_gate", HW_ADCANACLKDIV },
+
+       { CLKID_SYS_WDT,        "wdt_div",      "wdt_gate",    HW_WDTCLKDIV },
+       { CLKID_SYS_CLKOUT,     "clkout_div",   "clkout_gate", HW_CLKOUTCLKDIV },
+};
+
+static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
+       { 0, "main_gate",       "main_mux",     HW_MAINCLKUEN,  0 },
+       { 0, "uart_gate",       "uart_mux",     HW_UARTCLKUEN,  0 },
+       { 0, "i2s0_gate",       "i2s0_mux",     HW_I2S0CLKUEN,  0 },
+       { 0, "i2s1_gate",       "i2s1_mux",     HW_I2S1CLKUEN,  0 },
+       { 0, "wdt_gate",        "wdt_mux",      HW_WDTCLKUEN,   0 },
+       { 0, "clkout_gate",     "clkout_mux",   HW_CLKOUTCLKUEN, 0 },
+};
+static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
+       /* ahb gates */
+       { CLKID_AHB_ROM,        "rom",          "ahb_div",
+               HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_RAM,        "ram",          "ahb_div",
+               HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_GPIO,       "gpio",         "ahb_div",
+               HW_AHBCLKCTRL0, 4 },
+       { CLKID_AHB_MAC,        "mac",          "ahb_div",
+               HW_AHBCLKCTRL0, 5 },
+       { CLKID_AHB_EMI,        "emi",          "ahb_div",
+               HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_USB0,       "usb0",         "ahb_div",
+               HW_AHBCLKCTRL0, 7 },
+       { CLKID_AHB_USB1,       "usb1",         "ahb_div",
+               HW_AHBCLKCTRL0, 8 },
+       { CLKID_AHB_DMA0,       "dma0",         "ahb_div",
+               HW_AHBCLKCTRL0, 9 },
+       { CLKID_AHB_DMA1,       "dma1",         "ahb_div",
+               HW_AHBCLKCTRL0, 10 },
+       { CLKID_AHB_UART0,      "uart0",        "ahb_div",
+               HW_AHBCLKCTRL0, 11 },
+       { CLKID_AHB_UART1,      "uart1",        "ahb_div",
+               HW_AHBCLKCTRL0, 12 },
+       { CLKID_AHB_UART2,      "uart2",        "ahb_div",
+               HW_AHBCLKCTRL0, 13 },
+       { CLKID_AHB_UART3,      "uart3",        "ahb_div",
+               HW_AHBCLKCTRL0, 14 },
+       { CLKID_AHB_UART4,      "uart4",        "ahb_div",
+               HW_AHBCLKCTRL0, 15 },
+       { CLKID_AHB_UART5,      "uart5",        "ahb_div",
+               HW_AHBCLKCTRL0, 16 },
+       { CLKID_AHB_UART6,      "uart6",        "ahb_div",
+               HW_AHBCLKCTRL0, 17 },
+       { CLKID_AHB_UART7,      "uart7",        "ahb_div",
+               HW_AHBCLKCTRL0, 18 },
+       { CLKID_AHB_UART8,      "uart8",        "ahb_div",
+               HW_AHBCLKCTRL0, 19 },
+       { CLKID_AHB_UART9,      "uart9",        "ahb_div",
+               HW_AHBCLKCTRL0, 20 },
+       { CLKID_AHB_I2S0,       "i2s0",         "ahb_div",
+               HW_AHBCLKCTRL0, 21 },
+       { CLKID_AHB_I2C0,       "i2c0",         "ahb_div",
+               HW_AHBCLKCTRL0, 22 },
+       { CLKID_AHB_I2C1,       "i2c1",         "ahb_div",
+               HW_AHBCLKCTRL0, 23 },
+       { CLKID_AHB_SSP0,       "ssp0",         "ahb_div",
+               HW_AHBCLKCTRL0, 24 },
+       { CLKID_AHB_IOCONFIG,   "ioconf",       "ahb_div",
+               HW_AHBCLKCTRL0, 25 },
+       { CLKID_AHB_WDT,        "wdt",          "ahb_div",
+               HW_AHBCLKCTRL0, 26 },
+       { CLKID_AHB_CAN0,       "can0",         "ahb_div",
+               HW_AHBCLKCTRL0, 27 },
+       { CLKID_AHB_CAN1,       "can1",         "ahb_div",
+               HW_AHBCLKCTRL0, 28 },
+       { CLKID_AHB_MPWM,       "mpwm",         "ahb_div",
+               HW_AHBCLKCTRL0, 29 },
+       { CLKID_AHB_SPI0,       "spi0",         "ahb_div",
+               HW_AHBCLKCTRL0, 30 },
+       { CLKID_AHB_SPI1,       "spi1",         "ahb_div",
+               HW_AHBCLKCTRL0, 31 },
+
+       { CLKID_AHB_QEI,        "qei",          "ahb_div",
+               HW_AHBCLKCTRL1, 0 },
+       { CLKID_AHB_QUADSPI0,   "quadspi0",     "ahb_div",
+               HW_AHBCLKCTRL1, 1 },
+       { CLKID_AHB_CAMIF,      "capmif",       "ahb_div",
+               HW_AHBCLKCTRL1, 2 },
+       { CLKID_AHB_LCDIF,      "lcdif",        "ahb_div",
+               HW_AHBCLKCTRL1, 3 },
+       { CLKID_AHB_TIMER0,     "timer0",       "ahb_div",
+               HW_AHBCLKCTRL1, 4 },
+       { CLKID_AHB_TIMER1,     "timer1",       "ahb_div",
+               HW_AHBCLKCTRL1, 5 },
+       { CLKID_AHB_TIMER2,     "timer2",       "ahb_div",
+               HW_AHBCLKCTRL1, 6 },
+       { CLKID_AHB_TIMER3,     "timer3",       "ahb_div",
+               HW_AHBCLKCTRL1, 7 },
+       { CLKID_AHB_IRQ,        "irq",          "ahb_div",
+               HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_RTC,        "rtc",          "ahb_div",
+               HW_AHBCLKCTRL1, 9 },
+       { CLKID_AHB_NAND,       "nand",         "ahb_div",
+               HW_AHBCLKCTRL1, 10 },
+       { CLKID_AHB_ADC0,       "adc0",         "ahb_div",
+               HW_AHBCLKCTRL1, 11 },
+       { CLKID_AHB_LED,        "led",          "ahb_div",
+               HW_AHBCLKCTRL1, 12 },
+       { CLKID_AHB_DAC0,       "dac0",         "ahb_div",
+               HW_AHBCLKCTRL1, 13 },
+       { CLKID_AHB_LCD,        "lcd",          "ahb_div",
+               HW_AHBCLKCTRL1, 14 },
+       { CLKID_AHB_I2S1,       "i2s1",         "ahb_div",
+               HW_AHBCLKCTRL1, 15 },
+       { CLKID_AHB_MAC1,       "mac1",         "ahb_div",
+               HW_AHBCLKCTRL1, 16 },
+};
+
+static const char __initdata *main_mux_p[] =   { NULL, NULL };
+static const char __initdata *i2s0_mux_p[] =   { NULL, NULL, "i2s0m_div"};
+static const char __initdata *i2s1_mux_p[] =   { NULL, NULL, "i2s1m_div"};
+static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
+static u32 three_mux_table[] = {0, 1, 3};
+
+static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
+       { 1, three_mux_table, "main_mux",       main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
+       { 1, three_mux_table, "uart_mux",       main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
+       { 1, three_mux_table, "wdt_mux",        main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
+       { 3, three_mux_table, "i2s0_mux",       i2s0_mux_p,
+               ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
+       { 3, three_mux_table, "i2s1_mux",       i2s1_mux_p,
+               ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
+       { 3, three_mux_table, "clkout_mux",     clkout_mux_p,
+               ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
+};
+
+static void __init asm9260_acc_init(struct device_node *np)
+{
+       struct clk *clk;
+       const char *ref_clk, *pll_clk = "pll";
+       u32 rate;
+       int n;
+       u32 accuracy = 0;
+
+       base = of_io_request_and_map(np, 0, np->name);
+       if (!base)
+               panic("%s: unable to map resource", np->name);
+
+       /* register pll */
+       rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
+
+       ref_clk = of_clk_get_parent_name(np, 0);
+       accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
+       clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
+                       ref_clk, 0, rate, accuracy);
+
+       if (IS_ERR(clk))
+               panic("%s: can't register REFCLK. Check DT!", np->name);
+
+       for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
+               const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
+
+               mc->parent_names[0] = ref_clk;
+               mc->parent_names[1] = pll_clk;
+               clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
+                               mc->num_parents, mc->flags, base + mc->offset,
+                               0, mc->mask, 0, mc->table, &asm9260_clk_lock);
+       }
+
+       /* clock mux gate cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
+               const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
+
+               clk = clk_register_gate(NULL, gd->name,
+                       gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
+                       base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
+       }
+
+       /* clock div cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
+               const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
+
+               clks[dc->idx] = clk_register_divider(NULL, dc->name,
+                               dc->parent_name, CLK_SET_RATE_PARENT,
+                               base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
+                               &asm9260_clk_lock);
+       }
+
+       /* clock ahb gate cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
+               const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
+
+               clks[gd->idx] = clk_register_gate(NULL, gd->name,
+                               gd->parent_name, gd->flags, base + gd->reg,
+                               gd->bit_idx, 0, &asm9260_clk_lock);
+       }
+
+       /* check for errors on leaf clocks */
+       for (n = 0; n < MAX_CLKS; n++) {
+               if (!IS_ERR(clks[n]))
+                       continue;
+
+               pr_err("%s: Unable to register leaf clock %d\n",
+                               np->full_name, n);
+               goto fail;
+       }
+
+       /* register clk-provider */
+       clk_data.clks = clks;
+       clk_data.clk_num = MAX_CLKS;
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       return;
+fail:
+       iounmap(base);
+}
+CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
+               asm9260_acc_init);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
new file mode 100644 (file)
index 0000000..c386ad2
--- /dev/null
@@ -0,0 +1,700 @@
+/*
+ * TI CDCE706 programmable 3-PLL clock synthesizer driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CDCE706_CLKIN_CLOCK            10
+#define CDCE706_CLKIN_SOURCE           11
+#define CDCE706_PLL_M_LOW(pll)         (1 + 3 * (pll))
+#define CDCE706_PLL_N_LOW(pll)         (2 + 3 * (pll))
+#define CDCE706_PLL_HI(pll)            (3 + 3 * (pll))
+#define CDCE706_PLL_MUX                        3
+#define CDCE706_PLL_FVCO               6
+#define CDCE706_DIVIDER(div)           (13 + (div))
+#define CDCE706_CLKOUT(out)            (19 + (out))
+
+#define CDCE706_CLKIN_CLOCK_MASK       0x10
+#define CDCE706_CLKIN_SOURCE_SHIFT     6
+#define CDCE706_CLKIN_SOURCE_MASK      0xc0
+#define CDCE706_CLKIN_SOURCE_LVCMOS    0x40
+
+#define CDCE706_PLL_MUX_MASK(pll)      (0x80 >> (pll))
+#define CDCE706_PLL_LOW_M_MASK         0xff
+#define CDCE706_PLL_LOW_N_MASK         0xff
+#define CDCE706_PLL_HI_M_MASK          0x1
+#define CDCE706_PLL_HI_N_MASK          0x1e
+#define CDCE706_PLL_HI_N_SHIFT         1
+#define CDCE706_PLL_M_MAX              0x1ff
+#define CDCE706_PLL_N_MAX              0xfff
+#define CDCE706_PLL_FVCO_MASK(pll)     (0x80 >> (pll))
+#define CDCE706_PLL_FREQ_MIN            80000000
+#define CDCE706_PLL_FREQ_MAX           300000000
+#define CDCE706_PLL_FREQ_HI            180000000
+
+#define CDCE706_DIVIDER_PLL(div)       (9 + (div) - ((div) > 2) - ((div) > 4))
+#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1))
+#define CDCE706_DIVIDER_PLL_MASK(div)  (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
+#define CDCE706_DIVIDER_DIVIDER_MASK   0x7f
+#define CDCE706_DIVIDER_DIVIDER_MAX    0x7f
+
+#define CDCE706_CLKOUT_DIVIDER_MASK    0x7
+#define CDCE706_CLKOUT_ENABLE_MASK     0x8
+
+static struct regmap_config cdce706_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
+
+struct cdce706_hw_data {
+       struct cdce706_dev_data *dev_data;
+       unsigned idx;
+       unsigned parent;
+       struct clk *clk;
+       struct clk_hw hw;
+       unsigned div;
+       unsigned mul;
+       unsigned mux;
+};
+
+struct cdce706_dev_data {
+       struct i2c_client *client;
+       struct regmap *regmap;
+       struct clk_onecell_data onecell;
+       struct clk *clks[6];
+       struct clk *clkin_clk[2];
+       const char *clkin_name[2];
+       struct cdce706_hw_data clkin[1];
+       struct cdce706_hw_data pll[3];
+       struct cdce706_hw_data divider[6];
+       struct cdce706_hw_data clkout[6];
+};
+
+static const char * const cdce706_source_name[] = {
+       "clk_in0", "clk_in1",
+};
+
+static const char *cdce706_clkin_name[] = {
+       "clk_in",
+};
+
+static const char * const cdce706_pll_name[] = {
+       "pll1", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_parent_name[] = {
+       "clk_in", "pll1", "pll2", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_name[] = {
+       "p0", "p1", "p2", "p3", "p4", "p5",
+};
+
+static const char * const cdce706_clkout_name[] = {
+       "clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
+};
+
+static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
+                           unsigned *val)
+{
+       int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error reading reg %u", reg);
+       return rc;
+}
+
+static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
+                            unsigned val)
+{
+       int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error writing reg %u", reg);
+       return rc;
+}
+
+static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
+                             unsigned mask, unsigned val)
+{
+       int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error updating reg %u", reg);
+       return rc;
+}
+
+static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       hwd->parent = index;
+       return 0;
+}
+
+static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static const struct clk_ops cdce706_clkin_ops = {
+       .set_parent = cdce706_clkin_set_parent,
+       .get_parent = cdce706_clkin_get_parent,
+};
+
+static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mux: %d, mul: %u, div: %u\n",
+               __func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
+
+       if (!hwd->mux) {
+               if (hwd->div && hwd->mul) {
+                       u64 res = (u64)parent_rate * hwd->mul;
+
+                       do_div(res, hwd->div);
+                       return res;
+               }
+       } else {
+               if (hwd->div)
+                       return parent_rate / hwd->div;
+       }
+       return 0;
+}
+
+static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long *parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       unsigned long mul, div;
+       u64 res;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, rate: %lu, parent_rate: %lu\n",
+               __func__, rate, *parent_rate);
+
+       rational_best_approximation(rate, *parent_rate,
+                                   CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
+                                   &mul, &div);
+       hwd->mul = mul;
+       hwd->div = div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mul: %lu, div: %lu\n",
+               __func__, hwd->idx, mul, div);
+
+       res = (u64)*parent_rate * hwd->mul;
+       do_div(res, hwd->div);
+       return res;
+}
+
+static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       unsigned long mul = hwd->mul, div = hwd->div;
+       int err;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mul: %lu, div: %lu\n",
+               __func__, hwd->idx, mul, div);
+
+       err = cdce706_reg_update(hwd->dev_data,
+                                CDCE706_PLL_HI(hwd->idx),
+                                CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
+                                ((div >> 8) & CDCE706_PLL_HI_M_MASK) |
+                                ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
+                                 CDCE706_PLL_HI_N_MASK));
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_write(hwd->dev_data,
+                               CDCE706_PLL_M_LOW(hwd->idx),
+                               div & CDCE706_PLL_LOW_M_MASK);
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_write(hwd->dev_data,
+                               CDCE706_PLL_N_LOW(hwd->idx),
+                               mul & CDCE706_PLL_LOW_N_MASK);
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_update(hwd->dev_data,
+                                CDCE706_PLL_FVCO,
+                                CDCE706_PLL_FVCO_MASK(hwd->idx),
+                                rate > CDCE706_PLL_FREQ_HI ?
+                                CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
+       return err;
+}
+
+static const struct clk_ops cdce706_pll_ops = {
+       .recalc_rate = cdce706_pll_recalc_rate,
+       .round_rate = cdce706_pll_round_rate,
+       .set_rate = cdce706_pll_set_rate,
+};
+
+static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       if (hwd->parent == index)
+               return 0;
+       hwd->parent = index;
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_DIVIDER_PLL(hwd->idx),
+                                 CDCE706_DIVIDER_PLL_MASK(hwd->idx),
+                                 index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
+}
+
+static u8 cdce706_divider_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
+                                                unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %u\n",
+               __func__, hwd->idx, hwd->div);
+       if (hwd->div)
+               return parent_rate / hwd->div;
+       return 0;
+}
+
+static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long *parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       struct cdce706_dev_data *cdce = hwd->dev_data;
+       unsigned long mul, div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, rate: %lu, parent_rate: %lu\n",
+               __func__, rate, *parent_rate);
+
+       rational_best_approximation(rate, *parent_rate,
+                                   1, CDCE706_DIVIDER_DIVIDER_MAX,
+                                   &mul, &div);
+       if (!mul)
+               div = CDCE706_DIVIDER_DIVIDER_MAX;
+
+       if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+               unsigned long best_diff = rate;
+               unsigned long best_div = 0;
+               struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
+               unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
+
+               for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
+                    div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
+                       unsigned long n, m;
+                       unsigned long diff;
+                       unsigned long div_rate;
+                       u64 div_rate64;
+
+                       if (rate * div < CDCE706_PLL_FREQ_MIN)
+                               continue;
+
+                       rational_best_approximation(rate * div, gp_rate,
+                                                   CDCE706_PLL_N_MAX,
+                                                   CDCE706_PLL_M_MAX,
+                                                   &n, &m);
+                       div_rate64 = (u64)gp_rate * n;
+                       do_div(div_rate64, m);
+                       do_div(div_rate64, div);
+                       div_rate = div_rate64;
+                       diff = max(div_rate, rate) - min(div_rate, rate);
+
+                       if (diff < best_diff) {
+                               best_diff = diff;
+                               best_div = div;
+                               dev_dbg(&hwd->dev_data->client->dev,
+                                       "%s, %lu * %lu / %lu / %lu = %lu\n",
+                                       __func__, gp_rate, n, m, div, div_rate);
+                       }
+               }
+
+               div = best_div;
+
+               dev_dbg(&hwd->dev_data->client->dev,
+                       "%s, altering parent rate: %lu -> %lu\n",
+                       __func__, *parent_rate, rate * div);
+               *parent_rate = rate * div;
+       }
+       hwd->div = div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %lu\n",
+               __func__, hwd->idx, div);
+
+       return *parent_rate / div;
+}
+
+static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %u\n",
+               __func__, hwd->idx, hwd->div);
+
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_DIVIDER(hwd->idx),
+                                 CDCE706_DIVIDER_DIVIDER_MASK,
+                                 hwd->div);
+}
+
+static const struct clk_ops cdce706_divider_ops = {
+       .set_parent = cdce706_divider_set_parent,
+       .get_parent = cdce706_divider_get_parent,
+       .recalc_rate = cdce706_divider_recalc_rate,
+       .round_rate = cdce706_divider_round_rate,
+       .set_rate = cdce706_divider_set_rate,
+};
+
+static int cdce706_clkout_prepare(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+                                 CDCE706_CLKOUT_ENABLE_MASK,
+                                 CDCE706_CLKOUT_ENABLE_MASK);
+}
+
+static void cdce706_clkout_unprepare(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+                          CDCE706_CLKOUT_ENABLE_MASK, 0);
+}
+
+static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       if (hwd->parent == index)
+               return 0;
+       hwd->parent = index;
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_CLKOUT(hwd->idx),
+                                 CDCE706_CLKOUT_ENABLE_MASK, index);
+}
+
+static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       return parent_rate;
+}
+
+static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+                                     unsigned long *parent_rate)
+{
+       *parent_rate = rate;
+       return rate;
+}
+
+static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       return 0;
+}
+
+static const struct clk_ops cdce706_clkout_ops = {
+       .prepare = cdce706_clkout_prepare,
+       .unprepare = cdce706_clkout_unprepare,
+       .set_parent = cdce706_clkout_set_parent,
+       .get_parent = cdce706_clkout_get_parent,
+       .recalc_rate = cdce706_clkout_recalc_rate,
+       .round_rate = cdce706_clkout_round_rate,
+       .set_rate = cdce706_clkout_set_rate,
+};
+
+static int cdce706_register_hw(struct cdce706_dev_data *cdce,
+                              struct cdce706_hw_data *hw, unsigned num_hw,
+                              const char * const *clk_names,
+                              struct clk_init_data *init)
+{
+       unsigned i;
+
+       for (i = 0; i < num_hw; ++i, ++hw) {
+               init->name = clk_names[i];
+               hw->dev_data = cdce;
+               hw->idx = i;
+               hw->hw.init = init;
+               hw->clk = devm_clk_register(&cdce->client->dev,
+                                           &hw->hw);
+               if (IS_ERR(hw->clk)) {
+                       dev_err(&cdce->client->dev, "Failed to register %s\n",
+                               clk_names[i]);
+                       return PTR_ERR(hw->clk);
+               }
+       }
+       return 0;
+}
+
+static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_clkin_ops,
+               .parent_names = cdce->clkin_name,
+               .num_parents = ARRAY_SIZE(cdce->clkin_name),
+       };
+       unsigned i;
+       int ret;
+       unsigned clock, source;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
+               struct clk *parent = devm_clk_get(&cdce->client->dev,
+                                                 cdce706_source_name[i]);
+
+               if (IS_ERR(parent)) {
+                       cdce->clkin_name[i] = cdce706_source_name[i];
+               } else {
+                       cdce->clkin_name[i] = __clk_get_name(parent);
+                       cdce->clkin_clk[i] = parent;
+               }
+       }
+
+       ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
+       if (ret < 0)
+               return ret;
+       if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
+           CDCE706_CLKIN_SOURCE_LVCMOS) {
+               ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
+               if (ret < 0)
+                       return ret;
+               cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->clkin,
+                                 ARRAY_SIZE(cdce->clkin),
+                                 cdce706_clkin_name, &init);
+       return ret;
+}
+
+static int cdce706_register_plls(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_pll_ops,
+               .parent_names = cdce706_clkin_name,
+               .num_parents = ARRAY_SIZE(cdce706_clkin_name),
+       };
+       unsigned i;
+       int ret;
+       unsigned mux;
+
+       ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
+               unsigned m, n, v;
+
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
+               if (ret < 0)
+                       return ret;
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
+               if (ret < 0)
+                       return ret;
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
+               if (ret < 0)
+                       return ret;
+               cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
+               cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
+                                       (8 - CDCE706_PLL_HI_N_SHIFT));
+               cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
+                       cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->pll,
+                                 ARRAY_SIZE(cdce->pll),
+                                 cdce706_pll_name, &init);
+       return ret;
+}
+
+static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_divider_ops,
+               .parent_names = cdce706_divider_parent_name,
+               .num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
+               .flags = CLK_SET_RATE_PARENT,
+       };
+       unsigned i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
+               unsigned val;
+
+               ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->divider[i].parent =
+                       (val & CDCE706_DIVIDER_PLL_MASK(i)) >>
+                       CDCE706_DIVIDER_PLL_SHIFT(i);
+
+               ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, parent: %u, div: %u\n", __func__, i,
+                       cdce->divider[i].parent, cdce->divider[i].div);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->divider,
+                                 ARRAY_SIZE(cdce->divider),
+                                 cdce706_divider_name, &init);
+       return ret;
+}
+
+static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_clkout_ops,
+               .parent_names = cdce706_divider_name,
+               .num_parents = ARRAY_SIZE(cdce706_divider_name),
+               .flags = CLK_SET_RATE_PARENT,
+       };
+       unsigned i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
+               unsigned val;
+
+               ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, parent: %u\n", __func__, i,
+                       cdce->clkout[i].parent);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->clkout,
+                                 ARRAY_SIZE(cdce->clkout),
+                                 cdce706_clkout_name, &init);
+       for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
+               cdce->clks[i] = cdce->clkout[i].clk;
+
+       return ret;
+}
+
+static int cdce706_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+       struct cdce706_dev_data *cdce;
+       int ret;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -EIO;
+
+       cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
+       if (!cdce)
+               return -ENOMEM;
+
+       cdce->client = client;
+       cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
+       if (IS_ERR(cdce->regmap)) {
+               dev_err(&client->dev, "Failed to initialize regmap\n");
+               return -EINVAL;
+       }
+
+       i2c_set_clientdata(client, cdce);
+
+       ret = cdce706_register_clkin(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_plls(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_dividers(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_clkouts(cdce);
+       if (ret < 0)
+               return ret;
+       cdce->onecell.clks = cdce->clks;
+       cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
+       ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+                                 &cdce->onecell);
+
+       return ret;
+}
+
+static int cdce706_remove(struct i2c_client *client)
+{
+       return 0;
+}
+
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdce706_dt_match[] = {
+       { .compatible = "ti,cdce706" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, cdce706_dt_match);
+#endif
+
+static const struct i2c_device_id cdce706_id[] = {
+       { "cdce706", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, cdce706_id);
+
+static struct i2c_driver cdce706_i2c_driver = {
+       .driver = {
+               .name   = "cdce706",
+               .of_match_table = of_match_ptr(cdce706_dt_match),
+       },
+       .probe          = cdce706_probe,
+       .remove         = cdce706_remove,
+       .id_table       = cdce706_id,
+};
+module_i2c_driver(cdce706_i2c_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
+MODULE_LICENSE("GPL");
index 4386697236a78dc23aea66d0c4792873d558f71e..956b7e54fa1c5f4f3583ac642c3f3f2ae255fee2 100644 (file)
@@ -27,7 +27,7 @@ static u8 clk_composite_get_parent(struct clk_hw *hw)
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *mux_hw = composite->mux_hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->get_parent(mux_hw);
 }
@@ -38,7 +38,7 @@ static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *mux_hw = composite->mux_hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->set_parent(mux_hw, index);
 }
@@ -50,12 +50,14 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->recalc_rate(rate_hw, parent_rate);
 }
 
 static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_p)
 {
@@ -72,8 +74,10 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
        int i;
 
        if (rate_hw && rate_ops && rate_ops->determine_rate) {
-               rate_hw->clk = hw->clk;
-               return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+               __clk_hw_set_clk(rate_hw, hw);
+               return rate_ops->determine_rate(rate_hw, rate, min_rate,
+                                               max_rate,
+                                               best_parent_rate,
                                                best_parent_p);
        } else if (rate_hw && rate_ops && rate_ops->round_rate &&
                   mux_hw && mux_ops && mux_ops->set_parent) {
@@ -116,8 +120,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
 
                return best_rate;
        } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
-               mux_hw->clk = hw->clk;
-               return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+               __clk_hw_set_clk(mux_hw, hw);
+               return mux_ops->determine_rate(mux_hw, rate, min_rate,
+                                              max_rate, best_parent_rate,
                                               best_parent_p);
        } else {
                pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
@@ -132,7 +137,7 @@ static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->round_rate(rate_hw, rate, prate);
 }
@@ -144,7 +149,7 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->set_rate(rate_hw, rate, parent_rate);
 }
@@ -155,7 +160,7 @@ static int clk_composite_is_enabled(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->is_enabled(gate_hw);
 }
@@ -166,7 +171,7 @@ static int clk_composite_enable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->enable(gate_hw);
 }
@@ -177,7 +182,7 @@ static void clk_composite_disable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        gate_ops->disable(gate_hw);
 }
index c0a842b335c520c6c28f08308a1b62a743038dd3..db7f8bce7467a2abfd37f6ccb8e22c784a6bce28 100644 (file)
@@ -30,7 +30,7 @@
 
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
-#define div_mask(d)    ((1 << ((d)->width)) - 1)
+#define div_mask(width)        ((1 << (width)) - 1)
 
 static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
 {
@@ -54,15 +54,16 @@ static unsigned int _get_table_mindiv(const struct clk_div_table *table)
        return mindiv;
 }
 
-static unsigned int _get_maxdiv(struct clk_divider *divider)
+static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
+                               unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
-               return div_mask(divider);
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
-               return 1 << div_mask(divider);
-       if (divider->table)
-               return _get_table_maxdiv(divider->table);
-       return div_mask(divider) + 1;
+       if (flags & CLK_DIVIDER_ONE_BASED)
+               return div_mask(width);
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << div_mask(width);
+       if (table)
+               return _get_table_maxdiv(table);
+       return div_mask(width) + 1;
 }
 
 static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -76,14 +77,15 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+static unsigned int _get_div(const struct clk_div_table *table,
+                            unsigned int val, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+       if (flags & CLK_DIVIDER_ONE_BASED)
                return val;
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return 1 << val;
-       if (divider->table)
-               return _get_table_div(divider->table, val);
+       if (table)
+               return _get_table_div(table, val);
        return val + 1;
 }
 
@@ -98,29 +100,28 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
+static unsigned int _get_val(const struct clk_div_table *table,
+                            unsigned int div, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+       if (flags & CLK_DIVIDER_ONE_BASED)
                return div;
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return __ffs(div);
-       if (divider->table)
-               return  _get_table_val(divider->table, div);
+       if (table)
+               return  _get_table_val(table, div);
        return div - 1;
 }
 
-static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+                                 unsigned int val,
+                                 const struct clk_div_table *table,
+                                 unsigned long flags)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
-       unsigned int div, val;
+       unsigned int div;
 
-       val = clk_readl(divider->reg) >> divider->shift;
-       val &= div_mask(divider);
-
-       div = _get_div(divider, val);
+       div = _get_div(table, val, flags);
        if (!div) {
-               WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+               WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
                        "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
                        __clk_get_name(hw->clk));
                return parent_rate;
@@ -128,6 +129,20 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
 
        return DIV_ROUND_UP(parent_rate, div);
 }
+EXPORT_SYMBOL_GPL(divider_recalc_rate);
+
+static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int val;
+
+       val = clk_readl(divider->reg) >> divider->shift;
+       val &= div_mask(divider->width);
+
+       return divider_recalc_rate(hw, parent_rate, val, divider->table,
+                                  divider->flags);
+}
 
 /*
  * The reverse of DIV_ROUND_UP: The maximum number which
@@ -146,12 +161,13 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
        return false;
 }
 
-static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
+                         unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return is_power_of_2(div);
-       if (divider->table)
-               return _is_valid_table_div(divider->table, div);
+       if (table)
+               return _is_valid_table_div(table, div);
        return true;
 }
 
@@ -191,71 +207,76 @@ static int _round_down_table(const struct clk_div_table *table, int div)
        return down;
 }
 
-static int _div_round_up(struct clk_divider *divider,
-               unsigned long parent_rate, unsigned long rate)
+static int _div_round_up(const struct clk_div_table *table,
+                        unsigned long parent_rate, unsigned long rate,
+                        unsigned long flags)
 {
        int div = DIV_ROUND_UP(parent_rate, rate);
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                div = __roundup_pow_of_two(div);
-       if (divider->table)
-               div = _round_up_table(divider->table, div);
+       if (table)
+               div = _round_up_table(table, div);
 
        return div;
 }
 
-static int _div_round_closest(struct clk_divider *divider,
-               unsigned long parent_rate, unsigned long rate)
+static int _div_round_closest(const struct clk_div_table *table,
+                             unsigned long parent_rate, unsigned long rate,
+                             unsigned long flags)
 {
        int up, down, div;
 
        up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) {
+       if (flags & CLK_DIVIDER_POWER_OF_TWO) {
                up = __roundup_pow_of_two(div);
                down = __rounddown_pow_of_two(div);
-       } else if (divider->table) {
-               up = _round_up_table(divider->table, div);
-               down = _round_down_table(divider->table, div);
+       } else if (table) {
+               up = _round_up_table(table, div);
+               down = _round_down_table(table, div);
        }
 
        return (up - div) <= (div - down) ? up : down;
 }
 
-static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
-               unsigned long rate)
+static int _div_round(const struct clk_div_table *table,
+                     unsigned long parent_rate, unsigned long rate,
+                     unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
-               return _div_round_closest(divider, parent_rate, rate);
+       if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+               return _div_round_closest(table, parent_rate, rate, flags);
 
-       return _div_round_up(divider, parent_rate, rate);
+       return _div_round_up(table, parent_rate, rate, flags);
 }
 
-static bool _is_best_div(struct clk_divider *divider,
-               unsigned long rate, unsigned long now, unsigned long best)
+static bool _is_best_div(unsigned long rate, unsigned long now,
+                        unsigned long best, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
+       if (flags & CLK_DIVIDER_ROUND_CLOSEST)
                return abs(rate - now) < abs(rate - best);
 
        return now <= rate && now > best;
 }
 
-static int _next_div(struct clk_divider *divider, int div)
+static int _next_div(const struct clk_div_table *table, int div,
+                    unsigned long flags)
 {
        div++;
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return __roundup_pow_of_two(div);
-       if (divider->table)
-               return _round_up_table(divider->table, div);
+       if (table)
+               return _round_up_table(table, div);
 
        return div;
 }
 
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
-               unsigned long *best_parent_rate)
+                              unsigned long *best_parent_rate,
+                              const struct clk_div_table *table, u8 width,
+                              unsigned long flags)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
        int i, bestdiv = 0;
        unsigned long parent_rate, best = 0, now, maxdiv;
        unsigned long parent_rate_saved = *best_parent_rate;
@@ -263,19 +284,11 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (!rate)
                rate = 1;
 
-       /* if read only, just return current value */
-       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
-               bestdiv = readl(divider->reg) >> divider->shift;
-               bestdiv &= div_mask(divider);
-               bestdiv = _get_div(divider, bestdiv);
-               return bestdiv;
-       }
-
-       maxdiv = _get_maxdiv(divider);
+       maxdiv = _get_maxdiv(table, width, flags);
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
                parent_rate = *best_parent_rate;
-               bestdiv = _div_round(divider, parent_rate, rate);
+               bestdiv = _div_round(table, parent_rate, rate, flags);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
                return bestdiv;
@@ -287,8 +300,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
         */
        maxdiv = min(ULONG_MAX / rate, maxdiv);
 
-       for (i = 1; i <= maxdiv; i = _next_div(divider, i)) {
-               if (!_is_valid_div(divider, i))
+       for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
+               if (!_is_valid_div(table, i, flags))
                        continue;
                if (rate * i == parent_rate_saved) {
                        /*
@@ -302,7 +315,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
                parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
                                MULT_ROUND_UP(rate, i));
                now = DIV_ROUND_UP(parent_rate, i);
-               if (_is_best_div(divider, rate, now, best)) {
+               if (_is_best_div(rate, now, best, flags)) {
                        bestdiv = i;
                        best = now;
                        *best_parent_rate = parent_rate;
@@ -310,48 +323,79 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        }
 
        if (!bestdiv) {
-               bestdiv = _get_maxdiv(divider);
+               bestdiv = _get_maxdiv(table, width, flags);
                *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
        }
 
        return bestdiv;
 }
 
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long *prate)
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long *prate, const struct clk_div_table *table,
+                       u8 width, unsigned long flags)
 {
        int div;
-       div = clk_divider_bestdiv(hw, rate, prate);
+
+       div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
 
        return DIV_ROUND_UP(*prate, div);
 }
+EXPORT_SYMBOL_GPL(divider_round_rate);
 
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long parent_rate)
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long *prate)
 {
        struct clk_divider *divider = to_clk_divider(hw);
+       int bestdiv;
+
+       /* if read only, just return current value */
+       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+               bestdiv = readl(divider->reg) >> divider->shift;
+               bestdiv &= div_mask(divider->width);
+               bestdiv = _get_div(divider->table, bestdiv, divider->flags);
+               return bestdiv;
+       }
+
+       return divider_round_rate(hw, rate, prate, divider->table,
+                                 divider->width, divider->flags);
+}
+
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+                   const struct clk_div_table *table, u8 width,
+                   unsigned long flags)
+{
        unsigned int div, value;
-       unsigned long flags = 0;
-       u32 val;
 
        div = DIV_ROUND_UP(parent_rate, rate);
 
-       if (!_is_valid_div(divider, div))
+       if (!_is_valid_div(table, div, flags))
                return -EINVAL;
 
-       value = _get_val(divider, div);
+       value = _get_val(table, div, flags);
+
+       return min_t(unsigned int, value, div_mask(width));
+}
+EXPORT_SYMBOL_GPL(divider_get_val);
+
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int value;
+       unsigned long flags = 0;
+       u32 val;
 
-       if (value > div_mask(divider))
-               value = div_mask(divider);
+       value = divider_get_val(rate, parent_rate, divider->table,
+                               divider->width, divider->flags);
 
        if (divider->lock)
                spin_lock_irqsave(divider->lock, flags);
 
        if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
-               val = div_mask(divider) << (divider->shift + 16);
+               val = div_mask(divider->width) << (divider->shift + 16);
        } else {
                val = clk_readl(divider->reg);
-               val &= ~(div_mask(divider) << divider->shift);
+               val &= ~(div_mask(divider->width) << divider->shift);
        }
        val |= value << divider->shift;
        clk_writel(val, divider->reg);
@@ -463,3 +507,19 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                        width, clk_divider_flags, table, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_divider_table);
+
+void clk_unregister_divider(struct clk *clk)
+{
+       struct clk_divider *div;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       div = to_clk_divider(hw);
+
+       clk_unregister(clk);
+       kfree(div);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_divider);
index 51fd87fb7ba691e8a52e40ddc1ee4524ecd3e781..3f0e4200cb5d4ca4a680c78479ac86ed116766a5 100644 (file)
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        struct clk_init_data init;
 
        if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
-               if (bit_idx > 16) {
+               if (bit_idx > 15) {
                        pr_err("gate bit exceeds LOWORD field\n");
                        return ERR_PTR(-EINVAL);
                }
@@ -162,3 +162,19 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        return clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_gate);
+
+void clk_unregister_gate(struct clk *clk)
+{
+       struct clk_gate *gate;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       gate = to_clk_gate(hw);
+
+       clk_unregister(clk);
+       kfree(gate);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_gate);
index 6e1ecf94bf58daa279cb47e42065da9a9db3c581..69a094c3783d8eb2a2c0d3624f3a641f97a5d484 100644 (file)
@@ -177,3 +177,19 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
                                      NULL, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_mux);
+
+void clk_unregister_mux(struct clk *clk)
+{
+       struct clk_mux *mux;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       mux = to_clk_mux(hw);
+
+       clk_unregister(clk);
+       kfree(mux);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_mux);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
deleted file mode 100644 (file)
index 0a47d6f..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * clock driver for Freescale PowerPC corenet SoCs.
- */
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-struct cmux_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-       u32 flags;
-};
-
-#define PLL_KILL                       BIT(31)
-#define        CLKSEL_SHIFT            27
-#define CLKSEL_ADJUST          BIT(0)
-#define to_cmux_clk(p)         container_of(p, struct cmux_clk, hw)
-
-static unsigned int clocks_per_pll;
-
-static int cmux_set_parent(struct clk_hw *hw, u8 idx)
-{
-       struct cmux_clk *clk = to_cmux_clk(hw);
-       u32 clksel;
-
-       clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll;
-       if (clk->flags & CLKSEL_ADJUST)
-               clksel += 8;
-       clksel = (clksel & 0xf) << CLKSEL_SHIFT;
-       iowrite32be(clksel, clk->reg);
-
-       return 0;
-}
-
-static u8 cmux_get_parent(struct clk_hw *hw)
-{
-       struct cmux_clk *clk = to_cmux_clk(hw);
-       u32 clksel;
-
-       clksel = ioread32be(clk->reg);
-       clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
-       if (clk->flags & CLKSEL_ADJUST)
-               clksel -= 8;
-       clksel = (clksel >> 2) * clocks_per_pll + clksel % 4;
-
-       return clksel;
-}
-
-const struct clk_ops cmux_ops = {
-       .get_parent = cmux_get_parent,
-       .set_parent = cmux_set_parent,
-};
-
-static void __init core_mux_init(struct device_node *np)
-{
-       struct clk *clk;
-       struct clk_init_data init;
-       struct cmux_clk *cmux_clk;
-       struct device_node *node;
-       int rc, count, i;
-       u32     offset;
-       const char *clk_name;
-       const char **parent_names;
-
-       rc = of_property_read_u32(np, "reg", &offset);
-       if (rc) {
-               pr_err("%s: could not get reg property\n", np->name);
-               return;
-       }
-
-       /* get the input clock source count */
-       count = of_property_count_strings(np, "clock-names");
-       if (count < 0) {
-               pr_err("%s: get clock count error\n", np->name);
-               return;
-       }
-       parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL);
-       if (!parent_names) {
-               pr_err("%s: could not allocate parent_names\n", __func__);
-               return;
-       }
-
-       for (i = 0; i < count; i++)
-               parent_names[i] = of_clk_get_parent_name(np, i);
-
-       cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL);
-       if (!cmux_clk) {
-               pr_err("%s: could not allocate cmux_clk\n", __func__);
-               goto err_name;
-       }
-       cmux_clk->reg = of_iomap(np, 0);
-       if (!cmux_clk->reg) {
-               pr_err("%s: could not map register\n", __func__);
-               goto err_clk;
-       }
-
-       node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
-       if (node && (offset >= 0x80))
-               cmux_clk->flags = CLKSEL_ADJUST;
-
-       rc = of_property_read_string_index(np, "clock-output-names",
-                       0, &clk_name);
-       if (rc) {
-               pr_err("%s: read clock names error\n", np->name);
-               goto err_clk;
-       }
-
-       init.name = clk_name;
-       init.ops = &cmux_ops;
-       init.parent_names = parent_names;
-       init.num_parents = count;
-       init.flags = 0;
-       cmux_clk->hw.init = &init;
-
-       clk = clk_register(NULL, &cmux_clk->hw);
-       if (IS_ERR(clk)) {
-               pr_err("%s: could not register clock\n", clk_name);
-               goto err_clk;
-       }
-
-       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
-       if (rc) {
-               pr_err("Could not register clock provider for node:%s\n",
-                        np->name);
-               goto err_clk;
-       }
-       goto err_name;
-
-err_clk:
-       kfree(cmux_clk);
-err_name:
-       /* free *_names because they are reallocated when registered */
-       kfree(parent_names);
-}
-
-static void __init core_pll_init(struct device_node *np)
-{
-       u32 mult;
-       int i, rc, count;
-       const char *clk_name, *parent_name;
-       struct clk_onecell_data *onecell_data;
-       struct clk      **subclks;
-       void __iomem *base;
-
-       base = of_iomap(np, 0);
-       if (!base) {
-               pr_err("clk-ppc: iomap error\n");
-               return;
-       }
-
-       /* get the multiple of PLL */
-       mult = ioread32be(base);
-
-       /* check if this PLL is disabled */
-       if (mult & PLL_KILL) {
-               pr_debug("PLL:%s is disabled\n", np->name);
-               goto err_map;
-       }
-       mult = (mult >> 1) & 0x3f;
-
-       parent_name = of_clk_get_parent_name(np, 0);
-       if (!parent_name) {
-               pr_err("PLL: %s must have a parent\n", np->name);
-               goto err_map;
-       }
-
-       count = of_property_count_strings(np, "clock-output-names");
-       if (count < 0 || count > 4) {
-               pr_err("%s: clock is not supported\n", np->name);
-               goto err_map;
-       }
-
-       /* output clock number per PLL */
-       clocks_per_pll = count;
-
-       subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
-       if (!subclks) {
-               pr_err("%s: could not allocate subclks\n", __func__);
-               goto err_map;
-       }
-
-       onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
-       if (!onecell_data) {
-               pr_err("%s: could not allocate onecell_data\n", __func__);
-               goto err_clks;
-       }
-
-       for (i = 0; i < count; i++) {
-               rc = of_property_read_string_index(np, "clock-output-names",
-                               i, &clk_name);
-               if (rc) {
-                       pr_err("%s: could not get clock names\n", np->name);
-                       goto err_cell;
-               }
-
-               /*
-                * when count == 4, there are 4 output clocks:
-                * /1, /2, /3, /4 respectively
-                * when count < 4, there are at least 2 output clocks:
-                * /1, /2, (/4, if count == 3) respectively.
-                */
-               if (count == 4)
-                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-                                       parent_name, 0, mult, 1 + i);
-               else
-
-                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-                                       parent_name, 0, mult, 1 << i);
-
-               if (IS_ERR(subclks[i])) {
-                       pr_err("%s: could not register clock\n", clk_name);
-                       goto err_cell;
-               }
-       }
-
-       onecell_data->clks = subclks;
-       onecell_data->clk_num = count;
-
-       rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
-       if (rc) {
-               pr_err("Could not register clk provider for node:%s\n",
-                        np->name);
-               goto err_cell;
-       }
-
-       iounmap(base);
-       return;
-err_cell:
-       kfree(onecell_data);
-err_clks:
-       kfree(subclks);
-err_map:
-       iounmap(base);
-}
-
-static void __init sysclk_init(struct device_node *node)
-{
-       struct clk *clk;
-       const char *clk_name = node->name;
-       struct device_node *np = of_get_parent(node);
-       u32 rate;
-
-       if (!np) {
-               pr_err("ppc-clk: could not get parent node\n");
-               return;
-       }
-
-       if (of_property_read_u32(np, "clock-frequency", &rate)) {
-               of_node_put(node);
-               return;
-       }
-
-       of_property_read_string(np, "clock-output-names", &clk_name);
-
-       clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
-       if (!IS_ERR(clk))
-               of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
-static const struct of_device_id clk_match[] __initconst = {
-       { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
-       { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
-       { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
-       { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
-       { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
-       { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
-       {}
-};
-
-static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
-{
-       of_clk_init(clk_match);
-
-       return 0;
-}
-
-static const struct of_device_id ppc_clk_ids[] __initconst = {
-       { .compatible = "fsl,qoriq-clockgen-1.0", },
-       { .compatible = "fsl,qoriq-clockgen-2.0", },
-       {}
-};
-
-static struct platform_driver ppc_corenet_clk_driver = {
-       .driver = {
-               .name = "ppc_corenet_clock",
-               .of_match_table = ppc_clk_ids,
-       },
-       .probe = ppc_corenet_clk_probe,
-};
-
-static int __init ppc_corenet_clk_init(void)
-{
-       return platform_driver_register(&ppc_corenet_clk_driver);
-}
-subsys_initcall(ppc_corenet_clk_init);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
new file mode 100644 (file)
index 0000000..cda90a9
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * clock driver for Freescale QorIQ SoCs.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+struct cmux_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+       unsigned int clk_per_pll;
+       u32 flags;
+};
+
+#define PLL_KILL                       BIT(31)
+#define        CLKSEL_SHIFT            27
+#define CLKSEL_ADJUST          BIT(0)
+#define to_cmux_clk(p)         container_of(p, struct cmux_clk, hw)
+
+static int cmux_set_parent(struct clk_hw *hw, u8 idx)
+{
+       struct cmux_clk *clk = to_cmux_clk(hw);
+       u32 clksel;
+
+       clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
+       if (clk->flags & CLKSEL_ADJUST)
+               clksel += 8;
+       clksel = (clksel & 0xf) << CLKSEL_SHIFT;
+       iowrite32be(clksel, clk->reg);
+
+       return 0;
+}
+
+static u8 cmux_get_parent(struct clk_hw *hw)
+{
+       struct cmux_clk *clk = to_cmux_clk(hw);
+       u32 clksel;
+
+       clksel = ioread32be(clk->reg);
+       clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
+       if (clk->flags & CLKSEL_ADJUST)
+               clksel -= 8;
+       clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
+
+       return clksel;
+}
+
+static const struct clk_ops cmux_ops = {
+       .get_parent = cmux_get_parent,
+       .set_parent = cmux_set_parent,
+};
+
+static void __init core_mux_init(struct device_node *np)
+{
+       struct clk *clk;
+       struct clk_init_data init;
+       struct cmux_clk *cmux_clk;
+       struct device_node *node;
+       int rc, count, i;
+       u32     offset;
+       const char *clk_name;
+       const char **parent_names;
+       struct of_phandle_args clkspec;
+
+       rc = of_property_read_u32(np, "reg", &offset);
+       if (rc) {
+               pr_err("%s: could not get reg property\n", np->name);
+               return;
+       }
+
+       /* get the input clock source count */
+       count = of_property_count_strings(np, "clock-names");
+       if (count < 0) {
+               pr_err("%s: get clock count error\n", np->name);
+               return;
+       }
+       parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
+       if (!parent_names)
+               return;
+
+       for (i = 0; i < count; i++)
+               parent_names[i] = of_clk_get_parent_name(np, i);
+
+       cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
+       if (!cmux_clk)
+               goto err_name;
+
+       cmux_clk->reg = of_iomap(np, 0);
+       if (!cmux_clk->reg) {
+               pr_err("%s: could not map register\n", __func__);
+               goto err_clk;
+       }
+
+       rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
+                                       &clkspec);
+       if (rc) {
+               pr_err("%s: parse clock node error\n", __func__);
+               goto err_clk;
+       }
+
+       cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
+                       "clock-output-names");
+       of_node_put(clkspec.np);
+
+       node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
+       if (node && (offset >= 0x80))
+               cmux_clk->flags = CLKSEL_ADJUST;
+
+       rc = of_property_read_string_index(np, "clock-output-names",
+                                          0, &clk_name);
+       if (rc) {
+               pr_err("%s: read clock names error\n", np->name);
+               goto err_clk;
+       }
+
+       init.name = clk_name;
+       init.ops = &cmux_ops;
+       init.parent_names = parent_names;
+       init.num_parents = count;
+       init.flags = 0;
+       cmux_clk->hw.init = &init;
+
+       clk = clk_register(NULL, &cmux_clk->hw);
+       if (IS_ERR(clk)) {
+               pr_err("%s: could not register clock\n", clk_name);
+               goto err_clk;
+       }
+
+       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       if (rc) {
+               pr_err("Could not register clock provider for node:%s\n",
+                      np->name);
+               goto err_clk;
+       }
+       goto err_name;
+
+err_clk:
+       kfree(cmux_clk);
+err_name:
+       /* free *_names because they are reallocated when registered */
+       kfree(parent_names);
+}
+
+static void __init core_pll_init(struct device_node *np)
+{
+       u32 mult;
+       int i, rc, count;
+       const char *clk_name, *parent_name;
+       struct clk_onecell_data *onecell_data;
+       struct clk      **subclks;
+       void __iomem *base;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("iomap error\n");
+               return;
+       }
+
+       /* get the multiple of PLL */
+       mult = ioread32be(base);
+
+       /* check if this PLL is disabled */
+       if (mult & PLL_KILL) {
+               pr_debug("PLL:%s is disabled\n", np->name);
+               goto err_map;
+       }
+       mult = (mult >> 1) & 0x3f;
+
+       parent_name = of_clk_get_parent_name(np, 0);
+       if (!parent_name) {
+               pr_err("PLL: %s must have a parent\n", np->name);
+               goto err_map;
+       }
+
+       count = of_property_count_strings(np, "clock-output-names");
+       if (count < 0 || count > 4) {
+               pr_err("%s: clock is not supported\n", np->name);
+               goto err_map;
+       }
+
+       subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+       if (!subclks)
+               goto err_map;
+
+       onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
+       if (!onecell_data)
+               goto err_clks;
+
+       for (i = 0; i < count; i++) {
+               rc = of_property_read_string_index(np, "clock-output-names",
+                                                  i, &clk_name);
+               if (rc) {
+                       pr_err("%s: could not get clock names\n", np->name);
+                       goto err_cell;
+               }
+
+               /*
+                * when count == 4, there are 4 output clocks:
+                * /1, /2, /3, /4 respectively
+                * when count < 4, there are at least 2 output clocks:
+                * /1, /2, (/4, if count == 3) respectively.
+                */
+               if (count == 4)
+                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                       parent_name, 0, mult, 1 + i);
+               else
+
+                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                       parent_name, 0, mult, 1 << i);
+
+               if (IS_ERR(subclks[i])) {
+                       pr_err("%s: could not register clock\n", clk_name);
+                       goto err_cell;
+               }
+       }
+
+       onecell_data->clks = subclks;
+       onecell_data->clk_num = count;
+
+       rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
+       if (rc) {
+               pr_err("Could not register clk provider for node:%s\n",
+                      np->name);
+               goto err_cell;
+       }
+
+       iounmap(base);
+       return;
+err_cell:
+       kfree(onecell_data);
+err_clks:
+       kfree(subclks);
+err_map:
+       iounmap(base);
+}
+
+static void __init sysclk_init(struct device_node *node)
+{
+       struct clk *clk;
+       const char *clk_name = node->name;
+       struct device_node *np = of_get_parent(node);
+       u32 rate;
+
+       if (!np) {
+               pr_err("could not get parent node\n");
+               return;
+       }
+
+       if (of_property_read_u32(np, "clock-frequency", &rate)) {
+               of_node_put(node);
+               return;
+       }
+
+       of_property_read_string(np, "clock-output-names", &clk_name);
+
+       clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+       if (!IS_ERR(clk))
+               of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static void __init pltfrm_pll_init(struct device_node *np)
+{
+       void __iomem *base;
+       uint32_t mult;
+       const char *parent_name, *clk_name;
+       int i, _errno;
+       struct clk_onecell_data *cod;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
+               return;
+       }
+
+       /* Get the multiple of PLL */
+       mult = ioread32be(base);
+
+       iounmap(base);
+
+       /* Check if this PLL is disabled */
+       if (mult & PLL_KILL) {
+               pr_debug("%s(): %s: Disabled\n", __func__, np->name);
+               return;
+       }
+       mult = (mult & GENMASK(6, 1)) >> 1;
+
+       parent_name = of_clk_get_parent_name(np, 0);
+       if (!parent_name) {
+               pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
+                      __func__, np->name);
+               return;
+       }
+
+       i = of_property_count_strings(np, "clock-output-names");
+       if (i < 0) {
+               pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
+                      __func__, np->name, i);
+               return;
+       }
+
+       cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
+       if (!cod)
+               return;
+       cod->clks = (struct clk **)(cod + 1);
+       cod->clk_num = i;
+
+       for (i = 0; i < cod->clk_num; i++) {
+               _errno = of_property_read_string_index(np, "clock-output-names",
+                                                      i, &clk_name);
+               if (_errno < 0) {
+                       pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
+                              __func__, np->name, _errno);
+                       goto return_clk_unregister;
+               }
+
+               cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                              parent_name, 0, mult, 1 + i);
+               if (IS_ERR(cod->clks[i])) {
+                       pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
+                              __func__, np->name,
+                              clk_name, PTR_ERR(cod->clks[i]));
+                       goto return_clk_unregister;
+               }
+       }
+
+       _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
+       if (_errno < 0) {
+               pr_err("%s(): %s: of_clk_add_provider() = %d\n",
+                      __func__, np->name, _errno);
+               goto return_clk_unregister;
+       }
+
+       return;
+
+return_clk_unregister:
+       while (--i >= 0)
+               clk_unregister(cod->clks[i]);
+       kfree(cod);
+}
+
+CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
index 642cf37124d3780024d1739bf9298aa788c336e2..eb0152961d3c60652af108246e9d10be9d13a371 100644 (file)
@@ -9,7 +9,7 @@
  * Standard functionality for the common clock API.  See Documentation/clk.txt
  */
 
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
 #include <linux/clk/clk-conf.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -37,6 +37,55 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+static long clk_core_get_accuracy(struct clk_core *clk);
+static unsigned long clk_core_get_rate(struct clk_core *clk);
+static int clk_core_get_phase(struct clk_core *clk);
+static bool clk_core_is_prepared(struct clk_core *clk);
+static bool clk_core_is_enabled(struct clk_core *clk);
+static struct clk_core *clk_core_lookup(const char *name);
+
+/***    private data structures    ***/
+
+struct clk_core {
+       const char              *name;
+       const struct clk_ops    *ops;
+       struct clk_hw           *hw;
+       struct module           *owner;
+       struct clk_core         *parent;
+       const char              **parent_names;
+       struct clk_core         **parents;
+       u8                      num_parents;
+       u8                      new_parent_index;
+       unsigned long           rate;
+       unsigned long           req_rate;
+       unsigned long           new_rate;
+       struct clk_core         *new_parent;
+       struct clk_core         *new_child;
+       unsigned long           flags;
+       unsigned int            enable_count;
+       unsigned int            prepare_count;
+       unsigned long           accuracy;
+       int                     phase;
+       struct hlist_head       children;
+       struct hlist_node       child_node;
+       struct hlist_node       debug_node;
+       struct hlist_head       clks;
+       unsigned int            notifier_count;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *dentry;
+#endif
+       struct kref             ref;
+};
+
+struct clk {
+       struct clk_core *core;
+       const char *dev_id;
+       const char *con_id;
+       unsigned long min_rate;
+       unsigned long max_rate;
+       struct hlist_node child_node;
+};
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -114,7 +163,8 @@ static struct hlist_head *orphan_list[] = {
        NULL,
 };
 
-static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+                                int level)
 {
        if (!c)
                return;
@@ -122,14 +172,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
                   level * 3 + 1, "",
                   30 - level * 3, c->name,
-                  c->enable_count, c->prepare_count, clk_get_rate(c),
-                  clk_get_accuracy(c), clk_get_phase(c));
+                  c->enable_count, c->prepare_count, clk_core_get_rate(c),
+                  clk_core_get_accuracy(c), clk_core_get_phase(c));
 }
 
-static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
                                     int level)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        if (!c)
                return;
@@ -142,7 +192,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
 
 static int clk_summary_show(struct seq_file *s, void *data)
 {
-       struct clk *c;
+       struct clk_core *c;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
        seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
@@ -172,7 +222,7 @@ static const struct file_operations clk_summary_fops = {
        .release        = single_release,
 };
 
-static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
 {
        if (!c)
                return;
@@ -180,14 +230,14 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "\"%s\": { ", c->name);
        seq_printf(s, "\"enable_count\": %d,", c->enable_count);
        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
-       seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
-       seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
-       seq_printf(s, "\"phase\": %d", clk_get_phase(c));
+       seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
+       seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
+       seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
 }
 
-static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        if (!c)
                return;
@@ -204,7 +254,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
 
 static int clk_dump(struct seq_file *s, void *data)
 {
-       struct clk *c;
+       struct clk_core *c;
        bool first_node = true;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
@@ -240,7 +290,7 @@ static const struct file_operations clk_dump_fops = {
        .release        = single_release,
 };
 
-static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
+static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
 {
        struct dentry *d;
        int ret = -ENOMEM;
@@ -315,7 +365,7 @@ out:
  * initialized.  Otherwise it bails out early since the debugfs clk tree
  * will be created lazily by clk_debug_init as part of a late_initcall.
  */
-static int clk_debug_register(struct clk *clk)
+static int clk_debug_register(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -340,16 +390,12 @@ unlock:
  * debugfs clk tree if clk->dentry points to debugfs created by
  * clk_debug_register in __clk_init.
  */
-static void clk_debug_unregister(struct clk *clk)
+static void clk_debug_unregister(struct clk_core *clk)
 {
        mutex_lock(&clk_debug_lock);
-       if (!clk->dentry)
-               goto out;
-
        hlist_del_init(&clk->debug_node);
        debugfs_remove_recursive(clk->dentry);
        clk->dentry = NULL;
-out:
        mutex_unlock(&clk_debug_lock);
 }
 
@@ -358,8 +404,9 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
 {
        struct dentry *d = NULL;
 
-       if (hw->clk->dentry)
-               d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
+       if (hw->core->dentry)
+               d = debugfs_create_file(name, mode, hw->core->dentry, data,
+                                       fops);
 
        return d;
 }
@@ -379,7 +426,7 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
  */
 static int __init clk_debug_init(void)
 {
-       struct clk *clk;
+       struct clk_core *clk;
        struct dentry *d;
 
        rootdir = debugfs_create_dir("clk", NULL);
@@ -418,22 +465,20 @@ static int __init clk_debug_init(void)
 }
 late_initcall(clk_debug_init);
 #else
-static inline int clk_debug_register(struct clk *clk) { return 0; }
-static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+static inline int clk_debug_register(struct clk_core *clk) { return 0; }
+static inline void clk_debug_reparent(struct clk_core *clk,
+                                     struct clk_core *new_parent)
 {
 }
-static inline void clk_debug_unregister(struct clk *clk)
+static inline void clk_debug_unregister(struct clk_core *clk)
 {
 }
 #endif
 
 /* caller must hold prepare_lock */
-static void clk_unprepare_unused_subtree(struct clk *clk)
+static void clk_unprepare_unused_subtree(struct clk_core *clk)
 {
-       struct clk *child;
-
-       if (!clk)
-               return;
+       struct clk_core *child;
 
        hlist_for_each_entry(child, &clk->children, child_node)
                clk_unprepare_unused_subtree(child);
@@ -444,7 +489,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
        if (clk->flags & CLK_IGNORE_UNUSED)
                return;
 
-       if (__clk_is_prepared(clk)) {
+       if (clk_core_is_prepared(clk)) {
                if (clk->ops->unprepare_unused)
                        clk->ops->unprepare_unused(clk->hw);
                else if (clk->ops->unprepare)
@@ -453,14 +498,11 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
 }
 
 /* caller must hold prepare_lock */
-static void clk_disable_unused_subtree(struct clk *clk)
+static void clk_disable_unused_subtree(struct clk_core *clk)
 {
-       struct clk *child;
+       struct clk_core *child;
        unsigned long flags;
 
-       if (!clk)
-               goto out;
-
        hlist_for_each_entry(child, &clk->children, child_node)
                clk_disable_unused_subtree(child);
 
@@ -477,7 +519,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
         * sequence.  call .disable_unused if available, otherwise fall
         * back to .disable
         */
-       if (__clk_is_enabled(clk)) {
+       if (clk_core_is_enabled(clk)) {
                if (clk->ops->disable_unused)
                        clk->ops->disable_unused(clk->hw);
                else if (clk->ops->disable)
@@ -486,9 +528,6 @@ static void clk_disable_unused_subtree(struct clk *clk)
 
 unlock_out:
        clk_enable_unlock(flags);
-
-out:
-       return;
 }
 
 static bool clk_ignore_unused;
@@ -501,7 +540,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
 
 static int clk_disable_unused(void)
 {
-       struct clk *clk;
+       struct clk_core *clk;
 
        if (clk_ignore_unused) {
                pr_warn("clk: Not disabling unused clocks\n");
@@ -532,48 +571,65 @@ late_initcall_sync(clk_disable_unused);
 
 const char *__clk_get_name(struct clk *clk)
 {
-       return !clk ? NULL : clk->name;
+       return !clk ? NULL : clk->core->name;
 }
 EXPORT_SYMBOL_GPL(__clk_get_name);
 
 struct clk_hw *__clk_get_hw(struct clk *clk)
 {
-       return !clk ? NULL : clk->hw;
+       return !clk ? NULL : clk->core->hw;
 }
 EXPORT_SYMBOL_GPL(__clk_get_hw);
 
 u8 __clk_get_num_parents(struct clk *clk)
 {
-       return !clk ? 0 : clk->num_parents;
+       return !clk ? 0 : clk->core->num_parents;
 }
 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
 
 struct clk *__clk_get_parent(struct clk *clk)
 {
-       return !clk ? NULL : clk->parent;
+       if (!clk)
+               return NULL;
+
+       /* TODO: Create a per-user clk and change callers to call clk_put */
+       return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
 }
 EXPORT_SYMBOL_GPL(__clk_get_parent);
 
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
+                                                        u8 index)
 {
        if (!clk || index >= clk->num_parents)
                return NULL;
        else if (!clk->parents)
-               return __clk_lookup(clk->parent_names[index]);
+               return clk_core_lookup(clk->parent_names[index]);
        else if (!clk->parents[index])
                return clk->parents[index] =
-                       __clk_lookup(clk->parent_names[index]);
+                       clk_core_lookup(clk->parent_names[index]);
        else
                return clk->parents[index];
 }
+
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+       struct clk_core *parent;
+
+       if (!clk)
+               return NULL;
+
+       parent = clk_core_get_parent_by_index(clk->core, index);
+
+       return !parent ? NULL : parent->hw->clk;
+}
 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
 
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
-       return !clk ? 0 : clk->enable_count;
+       return !clk ? 0 : clk->core->enable_count;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
 {
        unsigned long ret;
 
@@ -593,9 +649,17 @@ unsigned long __clk_get_rate(struct clk *clk)
 out:
        return ret;
 }
+
+unsigned long __clk_get_rate(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_rate_nolock(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_get_rate);
 
-static unsigned long __clk_get_accuracy(struct clk *clk)
+static unsigned long __clk_get_accuracy(struct clk_core *clk)
 {
        if (!clk)
                return 0;
@@ -605,11 +669,11 @@ static unsigned long __clk_get_accuracy(struct clk *clk)
 
 unsigned long __clk_get_flags(struct clk *clk)
 {
-       return !clk ? 0 : clk->flags;
+       return !clk ? 0 : clk->core->flags;
 }
 EXPORT_SYMBOL_GPL(__clk_get_flags);
 
-bool __clk_is_prepared(struct clk *clk)
+static bool clk_core_is_prepared(struct clk_core *clk)
 {
        int ret;
 
@@ -630,7 +694,15 @@ out:
        return !!ret;
 }
 
-bool __clk_is_enabled(struct clk *clk)
+bool __clk_is_prepared(struct clk *clk)
+{
+       if (!clk)
+               return false;
+
+       return clk_core_is_prepared(clk->core);
+}
+
+static bool clk_core_is_enabled(struct clk_core *clk)
 {
        int ret;
 
@@ -650,12 +722,21 @@ bool __clk_is_enabled(struct clk *clk)
 out:
        return !!ret;
 }
+
+bool __clk_is_enabled(struct clk *clk)
+{
+       if (!clk)
+               return false;
+
+       return clk_core_is_enabled(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_is_enabled);
 
-static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
+static struct clk_core *__clk_lookup_subtree(const char *name,
+                                            struct clk_core *clk)
 {
-       struct clk *child;
-       struct clk *ret;
+       struct clk_core *child;
+       struct clk_core *ret;
 
        if (!strcmp(clk->name, name))
                return clk;
@@ -669,10 +750,10 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
        return NULL;
 }
 
-struct clk *__clk_lookup(const char *name)
+static struct clk_core *clk_core_lookup(const char *name)
 {
-       struct clk *root_clk;
-       struct clk *ret;
+       struct clk_core *root_clk;
+       struct clk_core *ret;
 
        if (!name)
                return NULL;
@@ -694,42 +775,53 @@ struct clk *__clk_lookup(const char *name)
        return NULL;
 }
 
-/*
- * Helper for finding best parent to provide a given frequency. This can be used
- * directly as a determine_rate callback (e.g. for a mux), or from a more
- * complex clock that may combine a mux with other operations.
- */
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
-                             unsigned long *best_parent_rate,
-                             struct clk_hw **best_parent_p)
+static bool mux_is_better_rate(unsigned long rate, unsigned long now,
+                          unsigned long best, unsigned long flags)
 {
-       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       if (flags & CLK_MUX_ROUND_CLOSEST)
+               return abs(now - rate) < abs(best - rate);
+
+       return now <= rate && now > best;
+}
+
+static long
+clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
+                            unsigned long min_rate,
+                            unsigned long max_rate,
+                            unsigned long *best_parent_rate,
+                            struct clk_hw **best_parent_p,
+                            unsigned long flags)
+{
+       struct clk_core *core = hw->core, *parent, *best_parent = NULL;
        int i, num_parents;
        unsigned long parent_rate, best = 0;
 
        /* if NO_REPARENT flag set, pass through to current parent */
-       if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
-               parent = clk->parent;
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       best = __clk_round_rate(parent, rate);
+       if (core->flags & CLK_SET_RATE_NO_REPARENT) {
+               parent = core->parent;
+               if (core->flags & CLK_SET_RATE_PARENT)
+                       best = __clk_determine_rate(parent ? parent->hw : NULL,
+                                                   rate, min_rate, max_rate);
                else if (parent)
-                       best = __clk_get_rate(parent);
+                       best = clk_core_get_rate_nolock(parent);
                else
-                       best = __clk_get_rate(clk);
+                       best = clk_core_get_rate_nolock(core);
                goto out;
        }
 
        /* find the parent that can provide the fastest rate <= rate */
-       num_parents = clk->num_parents;
+       num_parents = core->num_parents;
        for (i = 0; i < num_parents; i++) {
-               parent = clk_get_parent_by_index(clk, i);
+               parent = clk_core_get_parent_by_index(core, i);
                if (!parent)
                        continue;
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       parent_rate = __clk_round_rate(parent, rate);
+               if (core->flags & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_determine_rate(parent->hw, rate,
+                                                          min_rate,
+                                                          max_rate);
                else
-                       parent_rate = __clk_get_rate(parent);
-               if (parent_rate <= rate && parent_rate > best) {
+                       parent_rate = clk_core_get_rate_nolock(parent);
+               if (mux_is_better_rate(rate, parent_rate, best, flags)) {
                        best_parent = parent;
                        best = parent_rate;
                }
@@ -742,11 +834,63 @@ out:
 
        return best;
 }
+
+struct clk *__clk_lookup(const char *name)
+{
+       struct clk_core *core = clk_core_lookup(name);
+
+       return !core ? NULL : core->hw->clk;
+}
+
+static void clk_core_get_boundaries(struct clk_core *clk,
+                                   unsigned long *min_rate,
+                                   unsigned long *max_rate)
+{
+       struct clk *clk_user;
+
+       *min_rate = 0;
+       *max_rate = ULONG_MAX;
+
+       hlist_for_each_entry(clk_user, &clk->clks, child_node)
+               *min_rate = max(*min_rate, clk_user->min_rate);
+
+       hlist_for_each_entry(clk_user, &clk->clks, child_node)
+               *max_rate = min(*max_rate, clk_user->max_rate);
+}
+
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p)
+{
+       return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+                                           best_parent_rate,
+                                           best_parent_p, 0);
+}
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p)
+{
+       return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+                                           best_parent_rate,
+                                           best_parent_p,
+                                           CLK_MUX_ROUND_CLOSEST);
+}
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
+
 /***        clk api        ***/
 
-void __clk_unprepare(struct clk *clk)
+static void clk_core_unprepare(struct clk_core *clk)
 {
        if (!clk)
                return;
@@ -762,7 +906,7 @@ void __clk_unprepare(struct clk *clk)
        if (clk->ops->unprepare)
                clk->ops->unprepare(clk->hw);
 
-       __clk_unprepare(clk->parent);
+       clk_core_unprepare(clk->parent);
 }
 
 /**
@@ -782,12 +926,12 @@ void clk_unprepare(struct clk *clk)
                return;
 
        clk_prepare_lock();
-       __clk_unprepare(clk);
+       clk_core_unprepare(clk->core);
        clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
-int __clk_prepare(struct clk *clk)
+static int clk_core_prepare(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -795,14 +939,14 @@ int __clk_prepare(struct clk *clk)
                return 0;
 
        if (clk->prepare_count == 0) {
-               ret = __clk_prepare(clk->parent);
+               ret = clk_core_prepare(clk->parent);
                if (ret)
                        return ret;
 
                if (clk->ops->prepare) {
                        ret = clk->ops->prepare(clk->hw);
                        if (ret) {
-                               __clk_unprepare(clk->parent);
+                               clk_core_unprepare(clk->parent);
                                return ret;
                        }
                }
@@ -829,15 +973,18 @@ int clk_prepare(struct clk *clk)
 {
        int ret;
 
+       if (!clk)
+               return 0;
+
        clk_prepare_lock();
-       ret = __clk_prepare(clk);
+       ret = clk_core_prepare(clk->core);
        clk_prepare_unlock();
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
-static void __clk_disable(struct clk *clk)
+static void clk_core_disable(struct clk_core *clk)
 {
        if (!clk)
                return;
@@ -851,7 +998,15 @@ static void __clk_disable(struct clk *clk)
        if (clk->ops->disable)
                clk->ops->disable(clk->hw);
 
-       __clk_disable(clk->parent);
+       clk_core_disable(clk->parent);
+}
+
+static void __clk_disable(struct clk *clk)
+{
+       if (!clk)
+               return;
+
+       clk_core_disable(clk->core);
 }
 
 /**
@@ -879,7 +1034,7 @@ void clk_disable(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
-static int __clk_enable(struct clk *clk)
+static int clk_core_enable(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -890,7 +1045,7 @@ static int __clk_enable(struct clk *clk)
                return -ESHUTDOWN;
 
        if (clk->enable_count == 0) {
-               ret = __clk_enable(clk->parent);
+               ret = clk_core_enable(clk->parent);
 
                if (ret)
                        return ret;
@@ -898,7 +1053,7 @@ static int __clk_enable(struct clk *clk)
                if (clk->ops->enable) {
                        ret = clk->ops->enable(clk->hw);
                        if (ret) {
-                               __clk_disable(clk->parent);
+                               clk_core_disable(clk->parent);
                                return ret;
                        }
                }
@@ -908,6 +1063,14 @@ static int __clk_enable(struct clk *clk)
        return 0;
 }
 
+static int __clk_enable(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_enable(clk->core);
+}
+
 /**
  * clk_enable - ungate a clock
  * @clk: the clk being ungated
@@ -934,17 +1097,13 @@ int clk_enable(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_enable);
 
-/**
- * __clk_round_rate - round the given rate for a clk
- * @clk: round the rate of this clock
- * @rate: the rate which is to be rounded
- *
- * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
- */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
+                                               unsigned long rate,
+                                               unsigned long min_rate,
+                                               unsigned long max_rate)
 {
        unsigned long parent_rate = 0;
-       struct clk *parent;
+       struct clk_core *parent;
        struct clk_hw *parent_hw;
 
        if (!clk)
@@ -956,15 +1115,59 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 
        if (clk->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
-               return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
-                                               &parent_hw);
+               return clk->ops->determine_rate(clk->hw, rate,
+                                               min_rate, max_rate,
+                                               &parent_rate, &parent_hw);
        } else if (clk->ops->round_rate)
                return clk->ops->round_rate(clk->hw, rate, &parent_rate);
        else if (clk->flags & CLK_SET_RATE_PARENT)
-               return __clk_round_rate(clk->parent, rate);
+               return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
+                                                 max_rate);
        else
                return clk->rate;
 }
+
+/**
+ * __clk_determine_rate - get the closest rate actually supported by a clock
+ * @hw: determine the rate of this clock
+ * @rate: target rate
+ * @min_rate: returned rate must be greater than this rate
+ * @max_rate: returned rate must be less than this rate
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate and
+ * .determine_rate.
+ */
+unsigned long __clk_determine_rate(struct clk_hw *hw,
+                                  unsigned long rate,
+                                  unsigned long min_rate,
+                                  unsigned long max_rate)
+{
+       if (!hw)
+               return 0;
+
+       return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(__clk_determine_rate);
+
+/**
+ * __clk_round_rate - round the given rate for a clk
+ * @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
+ */
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+{
+       unsigned long min_rate;
+       unsigned long max_rate;
+
+       if (!clk)
+               return 0;
+
+       clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
+
+       return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
+}
 EXPORT_SYMBOL_GPL(__clk_round_rate);
 
 /**
@@ -980,6 +1183,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long ret;
 
+       if (!clk)
+               return 0;
+
        clk_prepare_lock();
        ret = __clk_round_rate(clk, rate);
        clk_prepare_unlock();
@@ -1002,22 +1208,21 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
  * a driver returns that.
  */
-static int __clk_notify(struct clk *clk, unsigned long msg,
+static int __clk_notify(struct clk_core *clk, unsigned long msg,
                unsigned long old_rate, unsigned long new_rate)
 {
        struct clk_notifier *cn;
        struct clk_notifier_data cnd;
        int ret = NOTIFY_DONE;
 
-       cnd.clk = clk;
        cnd.old_rate = old_rate;
        cnd.new_rate = new_rate;
 
        list_for_each_entry(cn, &clk_notifier_list, node) {
-               if (cn->clk == clk) {
+               if (cn->clk->core == clk) {
+                       cnd.clk = cn->clk;
                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
                                        &cnd);
-                       break;
                }
        }
 
@@ -1035,10 +1240,10 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_accuracies(struct clk *clk)
+static void __clk_recalc_accuracies(struct clk_core *clk)
 {
        unsigned long parent_accuracy = 0;
-       struct clk *child;
+       struct clk_core *child;
 
        if (clk->parent)
                parent_accuracy = clk->parent->accuracy;
@@ -1053,6 +1258,20 @@ static void __clk_recalc_accuracies(struct clk *clk)
                __clk_recalc_accuracies(child);
 }
 
+static long clk_core_get_accuracy(struct clk_core *clk)
+{
+       unsigned long accuracy;
+
+       clk_prepare_lock();
+       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
+               __clk_recalc_accuracies(clk);
+
+       accuracy = __clk_get_accuracy(clk);
+       clk_prepare_unlock();
+
+       return accuracy;
+}
+
 /**
  * clk_get_accuracy - return the accuracy of clk
  * @clk: the clk whose accuracy is being returned
@@ -1064,20 +1283,15 @@ static void __clk_recalc_accuracies(struct clk *clk)
  */
 long clk_get_accuracy(struct clk *clk)
 {
-       unsigned long accuracy;
-
-       clk_prepare_lock();
-       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
-               __clk_recalc_accuracies(clk);
-
-       accuracy = __clk_get_accuracy(clk);
-       clk_prepare_unlock();
+       if (!clk)
+               return 0;
 
-       return accuracy;
+       return clk_core_get_accuracy(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_get_accuracy);
 
-static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
+static unsigned long clk_recalc(struct clk_core *clk,
+                               unsigned long parent_rate)
 {
        if (clk->ops->recalc_rate)
                return clk->ops->recalc_rate(clk->hw, parent_rate);
@@ -1098,11 +1312,11 @@ static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
 {
        unsigned long old_rate;
        unsigned long parent_rate = 0;
-       struct clk *child;
+       struct clk_core *child;
 
        old_rate = clk->rate;
 
@@ -1122,15 +1336,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
                __clk_recalc_rates(child, msg);
 }
 
-/**
- * clk_get_rate - return the rate of clk
- * @clk: the clk whose rate is being returned
- *
- * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
- */
-unsigned long clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate(struct clk_core *clk)
 {
        unsigned long rate;
 
@@ -1139,14 +1345,32 @@ unsigned long clk_get_rate(struct clk *clk)
        if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
                __clk_recalc_rates(clk, 0);
 
-       rate = __clk_get_rate(clk);
+       rate = clk_core_get_rate_nolock(clk);
        clk_prepare_unlock();
 
        return rate;
 }
+EXPORT_SYMBOL_GPL(clk_core_get_rate);
+
+/**
+ * clk_get_rate - return the rate of clk
+ * @clk: the clk whose rate is being returned
+ *
+ * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
+ * is set, which means a recalc_rate will be issued.
+ * If clk is NULL then returns 0.
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_rate(clk->core);
+}
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
-static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+static int clk_fetch_parent_index(struct clk_core *clk,
+                                 struct clk_core *parent)
 {
        int i;
 
@@ -1160,7 +1384,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
        /*
         * find index of new parent clock using cached parent ptrs,
         * or if not yet cached, use string name comparison and cache
-        * them now to avoid future calls to __clk_lookup.
+        * them now to avoid future calls to clk_core_lookup.
         */
        for (i = 0; i < clk->num_parents; i++) {
                if (clk->parents[i] == parent)
@@ -1170,7 +1394,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
                        continue;
 
                if (!strcmp(clk->parent_names[i], parent->name)) {
-                       clk->parents[i] = __clk_lookup(parent->name);
+                       clk->parents[i] = clk_core_lookup(parent->name);
                        return i;
                }
        }
@@ -1178,7 +1402,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
        return -EINVAL;
 }
 
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
 {
        hlist_del(&clk->child_node);
 
@@ -1195,10 +1419,11 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
        clk->parent = new_parent;
 }
 
-static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
+static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
+                                          struct clk_core *parent)
 {
        unsigned long flags;
-       struct clk *old_parent = clk->parent;
+       struct clk_core *old_parent = clk->parent;
 
        /*
         * Migrate prepare state between parents and prevent race with
@@ -1218,9 +1443,9 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
         * See also: Comment for clk_set_parent() below.
         */
        if (clk->prepare_count) {
-               __clk_prepare(parent);
-               clk_enable(parent);
-               clk_enable(clk);
+               clk_core_prepare(parent);
+               clk_core_enable(parent);
+               clk_core_enable(clk);
        }
 
        /* update the clk tree topology */
@@ -1231,25 +1456,27 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
        return old_parent;
 }
 
-static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
-               struct clk *old_parent)
+static void __clk_set_parent_after(struct clk_core *core,
+                                  struct clk_core *parent,
+                                  struct clk_core *old_parent)
 {
        /*
         * Finish the migration of prepare state and undo the changes done
         * for preventing a race with clk_enable().
         */
-       if (clk->prepare_count) {
-               clk_disable(clk);
-               clk_disable(old_parent);
-               __clk_unprepare(old_parent);
+       if (core->prepare_count) {
+               clk_core_disable(core);
+               clk_core_disable(old_parent);
+               clk_core_unprepare(old_parent);
        }
 }
 
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
+                           u8 p_index)
 {
        unsigned long flags;
        int ret = 0;
-       struct clk *old_parent;
+       struct clk_core *old_parent;
 
        old_parent = __clk_set_parent_before(clk, parent);
 
@@ -1263,9 +1490,9 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
                clk_enable_unlock(flags);
 
                if (clk->prepare_count) {
-                       clk_disable(clk);
-                       clk_disable(parent);
-                       __clk_unprepare(parent);
+                       clk_core_disable(clk);
+                       clk_core_disable(parent);
+                       clk_core_unprepare(parent);
                }
                return ret;
        }
@@ -1291,9 +1518,10 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
  *
  * Caller must hold prepare_lock.
  */
-static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
+static int __clk_speculate_rates(struct clk_core *clk,
+                                unsigned long parent_rate)
 {
-       struct clk *child;
+       struct clk_core *child;
        unsigned long new_rate;
        int ret = NOTIFY_DONE;
 
@@ -1319,10 +1547,10 @@ out:
        return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
-                            struct clk *new_parent, u8 p_index)
+static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
+                            struct clk_core *new_parent, u8 p_index)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        clk->new_rate = new_rate;
        clk->new_parent = new_parent;
@@ -1342,13 +1570,16 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
  * calculate the new rates returning the topmost clock that has to be
  * changed.
  */
-static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
+static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
+                                          unsigned long rate)
 {
-       struct clk *top = clk;
-       struct clk *old_parent, *parent;
+       struct clk_core *top = clk;
+       struct clk_core *old_parent, *parent;
        struct clk_hw *parent_hw;
        unsigned long best_parent_rate = 0;
        unsigned long new_rate;
+       unsigned long min_rate;
+       unsigned long max_rate;
        int p_index = 0;
 
        /* sanity */
@@ -1360,16 +1591,22 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
        if (parent)
                best_parent_rate = parent->rate;
 
+       clk_core_get_boundaries(clk, &min_rate, &max_rate);
+
        /* find the closest rate and parent clk/rate */
        if (clk->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
                new_rate = clk->ops->determine_rate(clk->hw, rate,
+                                                   min_rate,
+                                                   max_rate,
                                                    &best_parent_rate,
                                                    &parent_hw);
-               parent = parent_hw ? parent_hw->clk : NULL;
+               parent = parent_hw ? parent_hw->core : NULL;
        } else if (clk->ops->round_rate) {
                new_rate = clk->ops->round_rate(clk->hw, rate,
                                                &best_parent_rate);
+               if (new_rate < min_rate || new_rate > max_rate)
+                       return NULL;
        } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
                /* pass-through clock without adjustable parent */
                clk->new_rate = clk->rate;
@@ -1390,7 +1627,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
        }
 
        /* try finding the new parent index */
-       if (parent) {
+       if (parent && clk->num_parents > 1) {
                p_index = clk_fetch_parent_index(clk, parent);
                if (p_index < 0) {
                        pr_debug("%s: clk %s can not be parent of clk %s\n",
@@ -1414,9 +1651,10 @@ out:
  * so that in case of an error we can walk down the whole tree again and
  * abort the change.
  */
-static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
+static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
+                                                 unsigned long event)
 {
-       struct clk *child, *tmp_clk, *fail_clk = NULL;
+       struct clk_core *child, *tmp_clk, *fail_clk = NULL;
        int ret = NOTIFY_DONE;
 
        if (clk->rate == clk->new_rate)
@@ -1451,14 +1689,14 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk *clk)
+static void clk_change_rate(struct clk_core *clk)
 {
-       struct clk *child;
+       struct clk_core *child;
        struct hlist_node *tmp;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
-       struct clk *old_parent;
+       struct clk_core *old_parent;
 
        old_rate = clk->rate;
 
@@ -1506,6 +1744,45 @@ static void clk_change_rate(struct clk *clk)
                clk_change_rate(clk->new_child);
 }
 
+static int clk_core_set_rate_nolock(struct clk_core *clk,
+                                   unsigned long req_rate)
+{
+       struct clk_core *top, *fail_clk;
+       unsigned long rate = req_rate;
+       int ret = 0;
+
+       if (!clk)
+               return 0;
+
+       /* bail early if nothing to do */
+       if (rate == clk_core_get_rate_nolock(clk))
+               return 0;
+
+       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
+               return -EBUSY;
+
+       /* calculate new rates and get the topmost changed clock */
+       top = clk_calc_new_rates(clk, rate);
+       if (!top)
+               return -EINVAL;
+
+       /* notify that we are about to change rates */
+       fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+       if (fail_clk) {
+               pr_debug("%s: failed to set %s rate\n", __func__,
+                               fail_clk->name);
+               clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+               return -EBUSY;
+       }
+
+       /* change the rates */
+       clk_change_rate(top);
+
+       clk->req_rate = req_rate;
+
+       return ret;
+}
+
 /**
  * clk_set_rate - specify a new rate for clk
  * @clk: the clk whose rate is being changed
@@ -1529,8 +1806,7 @@ static void clk_change_rate(struct clk *clk)
  */
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-       struct clk *top, *fail_clk;
-       int ret = 0;
+       int ret;
 
        if (!clk)
                return 0;
@@ -1538,41 +1814,81 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        /* prevent racing with updates to the clock topology */
        clk_prepare_lock();
 
-       /* bail early if nothing to do */
-       if (rate == clk_get_rate(clk))
-               goto out;
+       ret = clk_core_set_rate_nolock(clk->core, rate);
 
-       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
-               ret = -EBUSY;
-               goto out;
-       }
+       clk_prepare_unlock();
 
-       /* calculate new rates and get the topmost changed clock */
-       top = clk_calc_new_rates(clk, rate);
-       if (!top) {
-               ret = -EINVAL;
-               goto out;
-       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
 
-       /* notify that we are about to change rates */
-       fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
-       if (fail_clk) {
-               pr_debug("%s: failed to set %s rate\n", __func__,
-                               fail_clk->name);
-               clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-               ret = -EBUSY;
-               goto out;
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+       int ret = 0;
+
+       if (!clk)
+               return 0;
+
+       if (min > max) {
+               pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
+                      __func__, clk->core->name, clk->dev_id, clk->con_id,
+                      min, max);
+               return -EINVAL;
        }
 
-       /* change the rates */
-       clk_change_rate(top);
+       clk_prepare_lock();
+
+       if (min != clk->min_rate || max != clk->max_rate) {
+               clk->min_rate = min;
+               clk->max_rate = max;
+               ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+       }
 
-out:
        clk_prepare_unlock();
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(clk_set_rate);
+EXPORT_SYMBOL_GPL(clk_set_rate_range);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+       if (!clk)
+               return 0;
+
+       return clk_set_rate_range(clk, rate, clk->max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_min_rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+       if (!clk)
+               return 0;
+
+       return clk_set_rate_range(clk, clk->min_rate, rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_max_rate);
 
 /**
  * clk_get_parent - return the parent of a clk
@@ -1599,11 +1915,11 @@ EXPORT_SYMBOL_GPL(clk_get_parent);
  *
  * For single-parent clocks without .get_parent, first check to see if the
  * .parents array exists, and if so use it to avoid an expensive tree
- * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
+ * traversal.  If .parents does not exist then walk the tree.
  */
-static struct clk *__clk_init_parent(struct clk *clk)
+static struct clk_core *__clk_init_parent(struct clk_core *clk)
 {
-       struct clk *ret = NULL;
+       struct clk_core *ret = NULL;
        u8 index;
 
        /* handle the trivial cases */
@@ -1613,7 +1929,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (clk->num_parents == 1) {
                if (IS_ERR_OR_NULL(clk->parent))
-                       clk->parent = __clk_lookup(clk->parent_names[0]);
+                       clk->parent = clk_core_lookup(clk->parent_names[0]);
                ret = clk->parent;
                goto out;
        }
@@ -1627,8 +1943,8 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        /*
         * Do our best to cache parent clocks in clk->parents.  This prevents
-        * unnecessary and expensive calls to __clk_lookup.  We don't set
-        * clk->parent here; that is done by the calling function
+        * unnecessary and expensive lookups.  We don't set clk->parent here;
+        * that is done by the calling function.
         */
 
        index = clk->ops->get_parent(clk->hw);
@@ -1638,13 +1954,14 @@ static struct clk *__clk_init_parent(struct clk *clk)
                        kcalloc(clk->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
 
-       ret = clk_get_parent_by_index(clk, index);
+       ret = clk_core_get_parent_by_index(clk, index);
 
 out:
        return ret;
 }
 
-void __clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_core_reparent(struct clk_core *clk,
+                                 struct clk_core *new_parent)
 {
        clk_reparent(clk, new_parent);
        __clk_recalc_accuracies(clk);
@@ -1652,23 +1969,40 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
 }
 
 /**
- * clk_set_parent - switch the parent of a mux clk
- * @clk: the mux clk whose input we are switching
- * @parent: the new input to clk
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
  *
- * Re-parent clk to use parent as its new input source.  If clk is in
- * prepared state, the clk will get enabled for the duration of this call. If
- * that's not acceptable for a specific clk (Eg: the consumer can't handle
- * that, the reparenting is glitchy in hardware, etc), use the
- * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
- *
- * After successfully changing clk's parent clk_set_parent will update the
- * clk topology, sysfs topology and propagate rate recalculation via
- * __clk_recalc_rates.
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
  *
- * Returns 0 on success, -EERROR otherwise.
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
  */
-int clk_set_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+       struct clk_core *core, *parent_core;
+       unsigned int i;
+
+       /* NULL clocks should be nops, so return success if either is NULL. */
+       if (!clk || !parent)
+               return true;
+
+       core = clk->core;
+       parent_core = parent->core;
+
+       /* Optimize for the case where the parent is already the parent. */
+       if (core->parent == parent_core)
+               return true;
+
+       for (i = 0; i < core->num_parents; i++)
+               if (strcmp(core->parent_names[i], parent_core->name) == 0)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(clk_has_parent);
+
+static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
 {
        int ret = 0;
        int p_index = 0;
@@ -1728,6 +2062,31 @@ out:
 
        return ret;
 }
+
+/**
+ * clk_set_parent - switch the parent of a mux clk
+ * @clk: the mux clk whose input we are switching
+ * @parent: the new input to clk
+ *
+ * Re-parent clk to use parent as its new input source.  If clk is in
+ * prepared state, the clk will get enabled for the duration of this call. If
+ * that's not acceptable for a specific clk (Eg: the consumer can't handle
+ * that, the reparenting is glitchy in hardware, etc), use the
+ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
+ *
+ * After successfully changing clk's parent clk_set_parent will update the
+ * clk topology, sysfs topology and propagate rate recalculation via
+ * __clk_recalc_rates.
+ *
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+}
 EXPORT_SYMBOL_GPL(clk_set_parent);
 
 /**
@@ -1764,13 +2123,13 @@ int clk_set_phase(struct clk *clk, int degrees)
 
        clk_prepare_lock();
 
-       if (!clk->ops->set_phase)
+       if (!clk->core->ops->set_phase)
                goto out_unlock;
 
-       ret = clk->ops->set_phase(clk->hw, degrees);
+       ret = clk->core->ops->set_phase(clk->core->hw, degrees);
 
        if (!ret)
-               clk->phase = degrees;
+               clk->core->phase = degrees;
 
 out_unlock:
        clk_prepare_unlock();
@@ -1778,15 +2137,9 @@ out_unlock:
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(clk_set_phase);
 
-/**
- * clk_get_phase - return the phase shift of a clock signal
- * @clk: clock signal source
- *
- * Returns the phase shift of a clock node in degrees, otherwise returns
- * -EERROR.
- */
-int clk_get_phase(struct clk *clk)
+static int clk_core_get_phase(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -1800,28 +2153,48 @@ int clk_get_phase(struct clk *clk)
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(clk_get_phase);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_phase(clk->core);
+}
 
 /**
  * __clk_init - initialize the data structures in a struct clk
  * @dev:       device initializing this clk, placeholder for now
  * @clk:       clk being initialized
  *
- * Initializes the lists in struct clk, queries the hardware for the
+ * Initializes the lists in struct clk_core, queries the hardware for the
  * parent and rate and sets them both.
  */
-int __clk_init(struct device *dev, struct clk *clk)
+static int __clk_init(struct device *dev, struct clk *clk_user)
 {
        int i, ret = 0;
-       struct clk *orphan;
+       struct clk_core *orphan;
        struct hlist_node *tmp2;
+       struct clk_core *clk;
+       unsigned long rate;
 
-       if (!clk)
+       if (!clk_user)
                return -EINVAL;
 
+       clk = clk_user->core;
+
        clk_prepare_lock();
 
        /* check to see if a clock with this name is already registered */
-       if (__clk_lookup(clk->name)) {
+       if (clk_core_lookup(clk->name)) {
                pr_debug("%s: clk %s already initialized\n",
                                __func__, clk->name);
                ret = -EEXIST;
@@ -1873,7 +2246,7 @@ int __clk_init(struct device *dev, struct clk *clk)
                clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
                /*
-                * __clk_lookup returns NULL for parents that have not been
+                * clk_core_lookup returns NULL for parents that have not been
                 * clk_init'd; thus any access to clk->parents[] must check
                 * for a NULL pointer.  We can always perform lazy lookups for
                 * missing parents later on.
@@ -1881,7 +2254,7 @@ int __clk_init(struct device *dev, struct clk *clk)
                if (clk->parents)
                        for (i = 0; i < clk->num_parents; i++)
                                clk->parents[i] =
-                                       __clk_lookup(clk->parent_names[i]);
+                                       clk_core_lookup(clk->parent_names[i]);
        }
 
        clk->parent = __clk_init_parent(clk);
@@ -1936,12 +2309,13 @@ int __clk_init(struct device *dev, struct clk *clk)
         * then rate is set to zero.
         */
        if (clk->ops->recalc_rate)
-               clk->rate = clk->ops->recalc_rate(clk->hw,
-                               __clk_get_rate(clk->parent));
+               rate = clk->ops->recalc_rate(clk->hw,
+                               clk_core_get_rate_nolock(clk->parent));
        else if (clk->parent)
-               clk->rate = clk->parent->rate;
+               rate = clk->parent->rate;
        else
-               clk->rate = 0;
+               rate = 0;
+       clk->rate = clk->req_rate = rate;
 
        /*
         * walk the list of orphan clocks and reparent any that are children of
@@ -1951,13 +2325,13 @@ int __clk_init(struct device *dev, struct clk *clk)
                if (orphan->num_parents && orphan->ops->get_parent) {
                        i = orphan->ops->get_parent(orphan->hw);
                        if (!strcmp(clk->name, orphan->parent_names[i]))
-                               __clk_reparent(orphan, clk);
+                               clk_core_reparent(orphan, clk);
                        continue;
                }
 
                for (i = 0; i < orphan->num_parents; i++)
                        if (!strcmp(clk->name, orphan->parent_names[i])) {
-                               __clk_reparent(orphan, clk);
+                               clk_core_reparent(orphan, clk);
                                break;
                        }
         }
@@ -1983,47 +2357,39 @@ out:
        return ret;
 }
 
-/**
- * __clk_register - register a clock and return a cookie.
- *
- * Same as clk_register, except that the .clk field inside hw shall point to a
- * preallocated (generally statically allocated) struct clk. None of the fields
- * of the struct clk need to be initialized.
- *
- * The data pointed to by .init and .clk field shall NOT be marked as init
- * data.
- *
- * __clk_register is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized.  It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements its operations.  Returns 0
- * on success, otherwise an error code.
- */
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+                            const char *con_id)
 {
-       int ret;
        struct clk *clk;
 
-       clk = hw->clk;
-       clk->name = hw->init->name;
-       clk->ops = hw->init->ops;
-       clk->hw = hw;
-       clk->flags = hw->init->flags;
-       clk->parent_names = hw->init->parent_names;
-       clk->num_parents = hw->init->num_parents;
-       if (dev && dev->driver)
-               clk->owner = dev->driver->owner;
-       else
-               clk->owner = NULL;
+       /* This is to allow this function to be chained to others */
+       if (!hw || IS_ERR(hw))
+               return (struct clk *) hw;
 
-       ret = __clk_init(dev, clk);
-       if (ret)
-               return ERR_PTR(ret);
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk)
+               return ERR_PTR(-ENOMEM);
+
+       clk->core = hw->core;
+       clk->dev_id = dev_id;
+       clk->con_id = con_id;
+       clk->max_rate = ULONG_MAX;
+
+       clk_prepare_lock();
+       hlist_add_head(&clk->child_node, &hw->core->clks);
+       clk_prepare_unlock();
 
        return clk;
 }
-EXPORT_SYMBOL_GPL(__clk_register);
+
+void __clk_free_clk(struct clk *clk)
+{
+       clk_prepare_lock();
+       hlist_del(&clk->child_node);
+       clk_prepare_unlock();
+
+       kfree(clk);
+}
 
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
@@ -2039,7 +2405,7 @@ EXPORT_SYMBOL_GPL(__clk_register);
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
-       struct clk *clk;
+       struct clk_core *clk;
 
        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
        if (!clk) {
@@ -2060,7 +2426,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
        clk->hw = hw;
        clk->flags = hw->init->flags;
        clk->num_parents = hw->init->num_parents;
-       hw->clk = clk;
+       hw->core = clk;
 
        /* allocate local copy in case parent_names is __initdata */
        clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
@@ -2084,9 +2450,21 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
                }
        }
 
-       ret = __clk_init(dev, clk);
+       INIT_HLIST_HEAD(&clk->clks);
+
+       hw->clk = __clk_create_clk(hw, NULL, NULL);
+       if (IS_ERR(hw->clk)) {
+               pr_err("%s: could not allocate per-user clk\n", __func__);
+               ret = PTR_ERR(hw->clk);
+               goto fail_parent_names_copy;
+       }
+
+       ret = __clk_init(dev, hw->clk);
        if (!ret)
-               return clk;
+               return hw->clk;
+
+       __clk_free_clk(hw->clk);
+       hw->clk = NULL;
 
 fail_parent_names_copy:
        while (--i >= 0)
@@ -2107,7 +2485,7 @@ EXPORT_SYMBOL_GPL(clk_register);
  */
 static void __clk_release(struct kref *ref)
 {
-       struct clk *clk = container_of(ref, struct clk, ref);
+       struct clk_core *clk = container_of(ref, struct clk_core, ref);
        int i = clk->num_parents;
 
        kfree(clk->parents);
@@ -2165,12 +2543,13 @@ void clk_unregister(struct clk *clk)
        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
                return;
 
-       clk_debug_unregister(clk);
+       clk_debug_unregister(clk->core);
 
        clk_prepare_lock();
 
-       if (clk->ops == &clk_nodrv_ops) {
-               pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
+       if (clk->core->ops == &clk_nodrv_ops) {
+               pr_err("%s: unregistered clock: %s\n", __func__,
+                      clk->core->name);
                return;
        }
        /*
@@ -2178,24 +2557,25 @@ void clk_unregister(struct clk *clk)
         * a reference to this clock.
         */
        flags = clk_enable_lock();
-       clk->ops = &clk_nodrv_ops;
+       clk->core->ops = &clk_nodrv_ops;
        clk_enable_unlock(flags);
 
-       if (!hlist_empty(&clk->children)) {
-               struct clk *child;
+       if (!hlist_empty(&clk->core->children)) {
+               struct clk_core *child;
                struct hlist_node *t;
 
                /* Reparent all children to the orphan list. */
-               hlist_for_each_entry_safe(child, t, &clk->children, child_node)
-                       clk_set_parent(child, NULL);
+               hlist_for_each_entry_safe(child, t, &clk->core->children,
+                                         child_node)
+                       clk_core_set_parent(child, NULL);
        }
 
-       hlist_del_init(&clk->child_node);
+       hlist_del_init(&clk->core->child_node);
 
-       if (clk->prepare_count)
+       if (clk->core->prepare_count)
                pr_warn("%s: unregistering prepared clock: %s\n",
-                                       __func__, clk->name);
-       kref_put(&clk->ref, __clk_release);
+                                       __func__, clk->core->name);
+       kref_put(&clk->core->ref, __clk_release);
 
        clk_prepare_unlock();
 }
@@ -2263,11 +2643,13 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
  */
 int __clk_get(struct clk *clk)
 {
-       if (clk) {
-               if (!try_module_get(clk->owner))
+       struct clk_core *core = !clk ? NULL : clk->core;
+
+       if (core) {
+               if (!try_module_get(core->owner))
                        return 0;
 
-               kref_get(&clk->ref);
+               kref_get(&core->ref);
        }
        return 1;
 }
@@ -2280,11 +2662,20 @@ void __clk_put(struct clk *clk)
                return;
 
        clk_prepare_lock();
-       owner = clk->owner;
-       kref_put(&clk->ref, __clk_release);
+
+       hlist_del(&clk->child_node);
+       if (clk->min_rate > clk->core->req_rate ||
+           clk->max_rate < clk->core->req_rate)
+               clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+       owner = clk->core->owner;
+       kref_put(&clk->core->ref, __clk_release);
+
        clk_prepare_unlock();
 
        module_put(owner);
+
+       kfree(clk);
 }
 
 /***        clk rate change notifiers        ***/
@@ -2339,7 +2730,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 
        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
-       clk->notifier_count++;
+       clk->core->notifier_count++;
 
 out:
        clk_prepare_unlock();
@@ -2376,7 +2767,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
        if (cn->clk == clk) {
                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-               clk->notifier_count--;
+               clk->core->notifier_count--;
 
                /* XXX the notifier code should handle this better */
                if (!cn->notifier_head.head) {
@@ -2506,7 +2897,8 @@ void of_clk_del_provider(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_del_provider);
 
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+                                      const char *dev_id, const char *con_id)
 {
        struct of_clk_provider *provider;
        struct clk *clk = ERR_PTR(-EPROBE_DEFER);
@@ -2515,8 +2907,17 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
        list_for_each_entry(provider, &of_clk_providers, link) {
                if (provider->node == clkspec->np)
                        clk = provider->get(clkspec, provider->data);
-               if (!IS_ERR(clk))
+               if (!IS_ERR(clk)) {
+                       clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
+                                              con_id);
+
+                       if (!IS_ERR(clk) && !__clk_get(clk)) {
+                               __clk_free_clk(clk);
+                               clk = ERR_PTR(-ENOENT);
+                       }
+
                        break;
+               }
        }
 
        return clk;
@@ -2527,7 +2928,7 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
        struct clk *clk;
 
        mutex_lock(&of_clk_mutex);
-       clk = __of_clk_get_from_provider(clkspec);
+       clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
        mutex_unlock(&of_clk_mutex);
 
        return clk;
index c798138f023f6ae6a38c252c0946135a82814eba..ba845408cc3e8515ef8c9457f686727b57027185 100644 (file)
@@ -9,9 +9,31 @@
  * published by the Free Software Foundation.
  */
 
+struct clk_hw;
+
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec);
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+                                      const char *dev_id, const char *con_id);
 void of_clk_lock(void);
 void of_clk_unlock(void);
 #endif
+
+#ifdef CONFIG_COMMON_CLK
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+                            const char *con_id);
+void __clk_free_clk(struct clk *clk);
+#else
+/* All these casts to avoid ifdefs in clkdev... */
+static inline struct clk *
+__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
+{
+       return (struct clk *)hw;
+}
+static inline void __clk_free_clk(struct clk *clk) { }
+static struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+       return (struct clk_hw *)clk;
+}
+
+#endif
index da4bda8b7fc7e99d24598b041978d5c19bfa52f6..043fd3633373982f0408de1618370c6d63d5887b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mutex.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/of.h>
 
 #include "clk.h"
@@ -28,6 +29,20 @@ static DEFINE_MUTEX(clocks_mutex);
 
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 
+static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
+                                        const char *dev_id, const char *con_id)
+{
+       struct clk *clk;
+
+       if (!clkspec)
+               return ERR_PTR(-EINVAL);
+
+       of_clk_lock();
+       clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
+       of_clk_unlock();
+       return clk;
+}
+
 /**
  * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
  * @clkspec: pointer to a clock specifier data structure
@@ -38,22 +53,11 @@ static DEFINE_MUTEX(clocks_mutex);
  */
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
 {
-       struct clk *clk;
-
-       if (!clkspec)
-               return ERR_PTR(-EINVAL);
-
-       of_clk_lock();
-       clk = __of_clk_get_from_provider(clkspec);
-
-       if (!IS_ERR(clk) && !__clk_get(clk))
-               clk = ERR_PTR(-ENOENT);
-
-       of_clk_unlock();
-       return clk;
+       return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
 }
 
-struct clk *of_clk_get(struct device_node *np, int index)
+static struct clk *__of_clk_get(struct device_node *np, int index,
+                              const char *dev_id, const char *con_id)
 {
        struct of_phandle_args clkspec;
        struct clk *clk;
@@ -67,22 +71,21 @@ struct clk *of_clk_get(struct device_node *np, int index)
        if (rc)
                return ERR_PTR(rc);
 
-       clk = of_clk_get_by_clkspec(&clkspec);
+       clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
        of_node_put(clkspec.np);
+
        return clk;
 }
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+       return __of_clk_get(np, index, np->full_name, NULL);
+}
 EXPORT_SYMBOL(of_clk_get);
 
-/**
- * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
- * @np: pointer to clock consumer node
- * @name: name of consumer's clock input, or NULL for the first clock reference
- *
- * This function parses the clocks and clock-names properties,
- * and uses them to look up the struct clk from the registered list of clock
- * providers.
- */
-struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+                                       const char *dev_id,
+                                       const char *name)
 {
        struct clk *clk = ERR_PTR(-ENOENT);
 
@@ -97,10 +100,10 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
                 */
                if (name)
                        index = of_property_match_string(np, "clock-names", name);
-               clk = of_clk_get(np, index);
-               if (!IS_ERR(clk))
+               clk = __of_clk_get(np, index, dev_id, name);
+               if (!IS_ERR(clk)) {
                        break;
-               else if (name && index >= 0) {
+               else if (name && index >= 0) {
                        if (PTR_ERR(clk) != -EPROBE_DEFER)
                                pr_err("ERROR: could not get clock %s:%s(%i)\n",
                                        np->full_name, name ? name : "", index);
@@ -119,7 +122,33 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
 
        return clk;
 }
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+       if (!np)
+               return ERR_PTR(-ENOENT);
+
+       return __of_clk_get_by_name(np, np->full_name, name);
+}
 EXPORT_SYMBOL(of_clk_get_by_name);
+
+#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
+
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+                                       const char *dev_id,
+                                       const char *name)
+{
+       return ERR_PTR(-ENOENT);
+}
 #endif
 
 /*
@@ -168,14 +197,28 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 {
        struct clk_lookup *cl;
+       struct clk *clk = NULL;
 
        mutex_lock(&clocks_mutex);
+
        cl = clk_find(dev_id, con_id);
-       if (cl && !__clk_get(cl->clk))
+       if (!cl)
+               goto out;
+
+       clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
+       if (IS_ERR(clk))
+               goto out;
+
+       if (!__clk_get(clk)) {
+               __clk_free_clk(clk);
                cl = NULL;
+               goto out;
+       }
+
+out:
        mutex_unlock(&clocks_mutex);
 
-       return cl ? cl->clk : ERR_PTR(-ENOENT);
+       return cl ? clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
@@ -185,10 +228,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
        struct clk *clk;
 
        if (dev) {
-               clk = of_clk_get_by_name(dev->of_node, con_id);
-               if (!IS_ERR(clk))
-                       return clk;
-               if (PTR_ERR(clk) == -EPROBE_DEFER)
+               clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
+               if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
                        return clk;
        }
 
@@ -331,6 +372,7 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
 
        return 0;
 }
+EXPORT_SYMBOL(clk_register_clkdev);
 
 /**
  * clk_register_clkdevs - register a set of clk_lookup for a struct clk
index 007144f81f50b63301f211103dd200f7f21a4ec4..2e4f6d432bebeb21b6c543494764320407b9a488 100644 (file)
@@ -295,6 +295,8 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
 }
 
 static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p)
 {
index 48fa53c7ce5e0dbf4cfb7f5eb30abe0104015ef8..de6a873175d2b833f64d95f8e2bf5729da6fd1b4 100644 (file)
@@ -202,6 +202,8 @@ error:
 }
 
 static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
index 38e9153446059a1fb0c0728c841517cf99747f40..38e37bf6b821d8c6207c73179c36f98977933578 100644 (file)
@@ -1,3 +1,4 @@
 obj-y                          += clk-pxa.o
 obj-$(CONFIG_PXA25x)           += clk-pxa25x.o
 obj-$(CONFIG_PXA27x)           += clk-pxa27x.o
+obj-$(CONFIG_PXA3xx)           += clk-pxa3xx.o
index 4e834753ab094500677811702c37ad005503929b..29cee9e8d4d91bf34312207b7016e2f05c7d2746 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw,
                fix = &pclk->lp;
        else
                fix = &pclk->hp;
-       fix->hw.clk = hw->clk;
+       __clk_hw_set_clk(&fix->hw, hw);
        return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
 }
 
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
new file mode 100644 (file)
index 0000000..39f891b
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Marvell PXA3xxx family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
+ * should go away.
+ */
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <mach/smemc.h>
+#include <mach/pxa3xx-regs.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+       PXA_CORE_60Mhz = 0,
+       PXA_CORE_RUN,
+       PXA_CORE_TURBO,
+};
+
+enum {
+       PXA_BUS_60Mhz = 0,
+       PXA_BUS_HSS,
+};
+
+/* crystal frequency to HSIO bus frequency multiplier (HSS) */
+static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
+
+/* crystal frequency to static memory controller multiplier (SMCFS) */
+static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
+static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
+
+static const char * const get_freq_khz[] = {
+       "core", "ring_osc_60mhz", "run", "cpll", "system_bus"
+};
+
+/*
+ * Get the clock frequency as reflected by ACSR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa3xx_get_clk_frequency_khz(int info)
+{
+       struct clk *clk;
+       unsigned long clks[5];
+       int i;
+
+       for (i = 0; i < 5; i++) {
+               clk = clk_get(NULL, get_freq_khz[i]);
+               if (IS_ERR(clk)) {
+                       clks[i] = 0;
+               } else {
+                       clks[i] = clk_get_rate(clk);
+                       clk_put(clk);
+               }
+       }
+       if (info) {
+               pr_info("RO Mode clock: %ld.%02ldMHz\n",
+                       clks[1] / 1000000, (clks[0] % 1000000) / 10000);
+               pr_info("Run Mode clock: %ld.%02ldMHz\n",
+                       clks[2] / 1000000, (clks[1] % 1000000) / 10000);
+               pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+                       clks[3] / 1000000, (clks[2] % 1000000) / 10000);
+               pr_info("System bus clock: %ld.%02ldMHz\n",
+                       clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+       }
+       return (unsigned int)clks[0];
+}
+
+static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long ac97_div, rate;
+
+       ac97_div = AC97_DIV;
+
+       /* This may loose precision for some rates but won't for the
+        * standard 24.576MHz.
+        */
+       rate = parent_rate / 2;
+       rate /= ((ac97_div >> 12) & 0x7fff);
+       rate *= (ac97_div & 0xfff);
+
+       return rate;
+}
+PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
+
+static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+
+       return (parent_rate / 48)  * smcfs_mult[(acsr >> 23) & 0x7] /
+               df_clkdiv[(memclkcfg >> 16) & 0x3];
+}
+PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
+
+static bool pxa3xx_is_ring_osc_forced(void)
+{
+       unsigned long acsr = ACSR;
+
+       return acsr & ACCR_D0CS;
+}
+
+PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" };
+PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" };
+PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" };
+PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
+PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
+PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
+
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
+                   div_hp, bit, is_lp, flags)                          \
+       PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
+                mult_hp, div_hp, is_lp,  CKEN_AB(bit),                 \
+                (CKEN_ ## bit % 32), flags)
+#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp,         \
+                        mult_hp, div_hp, delay)                        \
+       PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp,       \
+                   div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0)
+#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents)                        \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      CKEN_AB(bit), (CKEN_ ## bit % 32), 0)
+
+static struct desc_clk_cken pxa3xx_clocks[] __initdata = {
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5),
+       PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0),
+
+       PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
+                         pxa3xx_32Khz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents),
+
+       PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4,
+                   1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED),
+};
+
+static struct desc_clk_cken pxa300_310_clocks[] __initdata = {
+
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+       PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa320_clocks[] __initdata = {
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa93x_clocks[] __initdata = {
+
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+       PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
+                                           unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int hss = (acsr >> 14) & 0x3;
+
+       if (pxa3xx_is_ring_osc_forced())
+               return parent_rate;
+       return parent_rate / 48 * hss_mult[hss];
+}
+
+static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw)
+{
+       if (pxa3xx_is_ring_osc_forced())
+               return PXA_BUS_60Mhz;
+       else
+               return PXA_BUS_HSS;
+}
+
+PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus");
+
+static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       return parent_rate;
+}
+
+static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw)
+{
+       unsigned long xclkcfg;
+       unsigned int t;
+
+       if (pxa3xx_is_ring_osc_forced())
+               return PXA_CORE_60Mhz;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       if (t)
+               return PXA_CORE_TURBO;
+       return PXA_CORE_RUN;
+}
+PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
+
+static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+       unsigned int t, xclkcfg;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       return t ? (parent_rate / xn) * 2 : parent_rate;
+}
+PARENTS(clk_pxa3xx_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa3xx_run, "run");
+
+static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+       unsigned int xl = acsr & ACCR_XL_MASK;
+       unsigned int t, xclkcfg;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn);
+       return t ? parent_rate * xl * xn : parent_rate * xl;
+}
+PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" };
+RATE_RO_OPS(clk_pxa3xx_cpll, "cpll");
+
+static void __init pxa3xx_register_core(void)
+{
+       clk_register_clk_pxa3xx_cpll();
+       clk_register_clk_pxa3xx_run();
+
+       clkdev_pxa_register(CLK_CORE, "core", NULL,
+                           clk_register_clk_pxa3xx_core());
+}
+
+static void __init pxa3xx_register_plls(void)
+{
+       clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               13 * MHz);
+       clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               32768);
+       clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               120 * MHz);
+       clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+       clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
+       clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
+                                 0, 1, 2);
+}
+
+#define DUMMY_CLK(_con_id, _dev_id, _parent) \
+       { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
+struct dummy_clk {
+       const char *con_id;
+       const char *dev_id;
+       const char *parent;
+};
+static struct dummy_clk dummy_clks[] __initdata = {
+       DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"),
+       DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
+       DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
+       DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"),
+};
+
+static void __init pxa3xx_dummy_clocks_init(void)
+{
+       struct clk *clk;
+       struct dummy_clk *d;
+       const char *name;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
+               d = &dummy_clks[i];
+               name = d->dev_id ? d->dev_id : d->con_id;
+               clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
+               clk_register_clkdev(clk, d->con_id, d->dev_id);
+       }
+}
+
+static void __init pxa3xx_base_clocks_init(void)
+{
+       pxa3xx_register_plls();
+       pxa3xx_register_core();
+       clk_register_clk_pxa3xx_system_bus();
+       clk_register_clk_pxa3xx_ac97();
+       clk_register_clk_pxa3xx_smemc();
+       clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
+                         (void __iomem *)&OSCC, 11, 0, NULL);
+}
+
+int __init pxa3xx_clocks_init(void)
+{
+       int ret;
+
+       pxa3xx_base_clocks_init();
+       pxa3xx_dummy_clocks_init();
+       ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+       if (ret)
+               return ret;
+       if (cpu_is_pxa320())
+               return clk_pxa_cken_init(pxa320_clocks,
+                                        ARRAY_SIZE(pxa320_clocks));
+       if (cpu_is_pxa300() || cpu_is_pxa310())
+               return clk_pxa_cken_init(pxa300_310_clocks,
+                                        ARRAY_SIZE(pxa300_310_clocks));
+       return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+}
+
+static void __init pxa3xx_dt_clocks_init(struct device_node *np)
+{
+       pxa3xx_clocks_init();
+       clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
index 1107351ed34682e7cd8fa04ffcd61c0e4bc146f9..0d7ab52b7ab0076fec09340410c09bca0efe3b4f 100644 (file)
@@ -29,6 +29,15 @@ config IPQ_GCC_806X
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, etc.
 
+config IPQ_LCC_806X
+       tristate "IPQ806x LPASS Clock Controller"
+       select IPQ_GCC_806X
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the LPASS clock controller on ipq806x devices.
+         Say Y if you want to use audio devices such as i2s, pcm,
+         S/PDIF, etc.
+
 config MSM_GCC_8660
        tristate "MSM8660 Global Clock Controller"
        depends on COMMON_CLK_QCOM
@@ -45,6 +54,15 @@ config MSM_GCC_8960
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, SATA, PCIe, etc.
 
+config MSM_LCC_8960
+       tristate "APQ8064/MSM8960 LPASS Clock Controller"
+       select MSM_GCC_8960
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the LPASS clock controller on apq8064/msm8960 devices.
+         Say Y if you want to use audio devices such as i2s, pcm,
+         SLIMBus, etc.
+
 config MSM_MMCC_8960
        tristate "MSM8960 Multimedia Clock Controller"
        select MSM_GCC_8960
index 783cfb24faa41cbbac81e676d5ed129733529aac..61782646959534bec2b19d445762abbec7fb1591 100644 (file)
@@ -6,13 +6,17 @@ clk-qcom-y += clk-pll.o
 clk-qcom-y += clk-rcg.o
 clk-qcom-y += clk-rcg2.o
 clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
 clk-qcom-y += reset.o
 
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
index 60873a7f45d94b3687bb342b86dd7bbfb9db67df..b4325f65a1bf6f225811936c9a00927391c62787 100644 (file)
@@ -141,6 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
 
 static long
 clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                      unsigned long min_rate, unsigned long max_rate,
                       unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_pll *pll = to_clk_pll(hw);
index 0b93972c8807f11ef5e27bf6eb9f14e85b9f1055..0039bd7d3965370108bba619abdf40009495e693 100644 (file)
@@ -368,6 +368,7 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
                const struct freq_tbl *f, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p_hw)
 {
        unsigned long clk_flags;
@@ -397,22 +398,27 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 }
 
 static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
 
-       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+                       max_rate, p_rate, p);
 }
 
 static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
 
-       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+                       max_rate, p_rate, p);
 }
 
 static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
index 08b8b3729f539ee769f15d1f474e8c3718d1e640..742acfa18d63798c19c25884ef2b50d508965858 100644 (file)
@@ -208,6 +208,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 }
 
 static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -361,6 +362,8 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
 }
 
 static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -412,6 +415,7 @@ const struct clk_ops clk_edp_pixel_ops = {
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
 static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
+                        unsigned long min_rate, unsigned long max_rate,
                         unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -476,6 +480,8 @@ static const struct frac_entry frac_table_pixel[] = {
 };
 
 static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
new file mode 100644 (file)
index 0000000..5348491
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-divider.h"
+
+static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
+{
+       return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
+}
+
+static long div_round_rate(struct clk_hw *hw, unsigned long rate,
+                          unsigned long *prate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+
+       return divider_round_rate(hw, rate, prate, NULL, divider->width,
+                                 CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int div_set_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long parent_rate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+       struct clk_regmap *clkr = &divider->clkr;
+       u32 div;
+
+       div = divider_get_val(rate, parent_rate, NULL, divider->width,
+                             CLK_DIVIDER_ROUND_CLOSEST);
+
+       return regmap_update_bits(clkr->regmap, divider->reg,
+                                 (BIT(divider->width) - 1) << divider->shift,
+                                 div << divider->shift);
+}
+
+static unsigned long div_recalc_rate(struct clk_hw *hw,
+                                    unsigned long parent_rate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+       struct clk_regmap *clkr = &divider->clkr;
+       u32 div;
+
+       regmap_read(clkr->regmap, divider->reg, &div);
+       div >>= divider->shift;
+       div &= BIT(divider->width) - 1;
+
+       return divider_recalc_rate(hw, parent_rate, div, NULL,
+                                  CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+const struct clk_ops clk_regmap_div_ops = {
+       .round_rate = div_round_rate,
+       .set_rate = div_set_rate,
+       .recalc_rate = div_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
new file mode 100644 (file)
index 0000000..fc4492e
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
+#define __QCOM_CLK_REGMAP_DIVIDER_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_div {
+       u32                     reg;
+       u32                     shift;
+       u32                     width;
+       struct clk_regmap       clkr;
+};
+
+extern const struct clk_ops clk_regmap_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
new file mode 100644 (file)
index 0000000..cae3071
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-mux.h"
+
+static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw)
+{
+       return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr);
+}
+
+static u8 mux_get_parent(struct clk_hw *hw)
+{
+       struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+       struct clk_regmap *clkr = to_clk_regmap(hw);
+       unsigned int mask = GENMASK(mux->width - 1, 0);
+       unsigned int val;
+
+       regmap_read(clkr->regmap, mux->reg, &val);
+
+       val >>= mux->shift;
+       val &= mask;
+
+       return val;
+}
+
+static int mux_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+       struct clk_regmap *clkr = to_clk_regmap(hw);
+       unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+       unsigned int val;
+
+       val = index;
+       val <<= mux->shift;
+
+       return regmap_update_bits(clkr->regmap, mux->reg, mask, val);
+}
+
+const struct clk_ops clk_regmap_mux_closest_ops = {
+       .get_parent = mux_get_parent,
+       .set_parent = mux_set_parent,
+       .determine_rate = __clk_mux_determine_rate_closest,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h
new file mode 100644 (file)
index 0000000..5cec761
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_H__
+#define __QCOM_CLK_REGMAP_MUX_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_mux {
+       u32                     reg;
+       u32                     shift;
+       u32                     width;
+       struct clk_regmap       clkr;
+};
+
+extern const struct clk_ops clk_regmap_mux_closest_ops;
+
+#endif
index afed5eb0691e101c246275b1b6a80d44de9d1eed..cbdc31dea7f4311d091b938959179b3dd97e0fbe 100644 (file)
@@ -75,6 +75,17 @@ static struct clk_pll pll3 = {
        },
 };
 
+static struct clk_regmap pll4_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(4),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll4_vote",
+               .parent_names = (const char *[]){ "pll4" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
 static struct clk_pll pll8 = {
        .l_reg = 0x3144,
        .m_reg = 0x3148,
@@ -2163,6 +2174,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL0] = &pll0.clkr,
        [PLL0_VOTE] = &pll0_vote,
        [PLL3] = &pll3.clkr,
+       [PLL4_VOTE] = &pll4_vote,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
new file mode 100644 (file)
index 0000000..121ffde
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+       .l_reg = 0x4,
+       .m_reg = 0x8,
+       .n_reg = 0xc,
+       .config_reg = 0x14,
+       .mode_reg = 0x0,
+       .status_reg = 0x18,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll4",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static const struct pll_config pll4_config = {
+       .l = 0xf,
+       .m = 0x91,
+       .n = 0xc7,
+       .vco_val = 0x0,
+       .vco_mask = BIT(17) | BIT(16),
+       .pre_div_val = 0x0,
+       .pre_div_mask = BIT(19),
+       .post_div_val = 0x0,
+       .post_div_mask = BIT(21) | BIT(20),
+       .mn_ena_mask = BIT(22),
+       .main_output_mask = BIT(23),
+};
+
+#define P_PXO  0
+#define P_PLL4 1
+
+static const u8 lcc_pxo_pll4_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL4]        = 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+       "pxo",
+       "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_mi2s[] = {
+       {  1024000, P_PLL4, 4,  1,  96 },
+       {  1411200, P_PLL4, 4,  2, 139 },
+       {  1536000, P_PLL4, 4,  1,  64 },
+       {  2048000, P_PLL4, 4,  1,  48 },
+       {  2116800, P_PLL4, 4,  2,  93 },
+       {  2304000, P_PLL4, 4,  2,  85 },
+       {  2822400, P_PLL4, 4,  6, 209 },
+       {  3072000, P_PLL4, 4,  1,  32 },
+       {  3175200, P_PLL4, 4,  1,  31 },
+       {  4096000, P_PLL4, 4,  1,  24 },
+       {  4233600, P_PLL4, 4,  9, 209 },
+       {  4608000, P_PLL4, 4,  3,  64 },
+       {  5644800, P_PLL4, 4, 12, 209 },
+       {  6144000, P_PLL4, 4,  1,  16 },
+       {  6350400, P_PLL4, 4,  2,  31 },
+       {  8192000, P_PLL4, 4,  1,  12 },
+       {  8467200, P_PLL4, 4, 18, 209 },
+       {  9216000, P_PLL4, 4,  3,  32 },
+       { 11289600, P_PLL4, 4, 24, 209 },
+       { 12288000, P_PLL4, 4,  1,   8 },
+       { 12700800, P_PLL4, 4, 27, 209 },
+       { 13824000, P_PLL4, 4,  9,  64 },
+       { 16384000, P_PLL4, 4,  1,   6 },
+       { 16934400, P_PLL4, 4, 41, 238 },
+       { 18432000, P_PLL4, 4,  3,  16 },
+       { 22579200, P_PLL4, 2, 24, 209 },
+       { 24576000, P_PLL4, 4,  1,   4 },
+       { 27648000, P_PLL4, 4,  9,  32 },
+       { 33868800, P_PLL4, 4, 41, 119 },
+       { 36864000, P_PLL4, 4,  3,   8 },
+       { 45158400, P_PLL4, 1, 24, 209 },
+       { 49152000, P_PLL4, 4,  1,   2 },
+       { 50803200, P_PLL4, 1, 27, 209 },
+       { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+       .ns_reg = 0x48,
+       .md_reg = 0x4c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_mi2s,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_mi2s_parents[] = {
+       "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+       .reg = 0x48,
+       .shift = 10,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_div_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_div_clk",
+                       .parent_names = (const char *[]){ "mi2s_div_clk" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+       .reg = 0x48,
+       .shift = 14,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_clk",
+                       .parent_names = (const char *[]){
+                               "mi2s_bit_div_clk",
+                               "mi2s_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_pcm[] = {
+       {   64000, P_PLL4, 4, 1, 1536 },
+       {  128000, P_PLL4, 4, 1,  768 },
+       {  256000, P_PLL4, 4, 1,  384 },
+       {  512000, P_PLL4, 4, 1,  192 },
+       { 1024000, P_PLL4, 4, 1,   96 },
+       { 2048000, P_PLL4, 4, 1,   48 },
+       { },
+};
+
+static struct clk_rcg pcm_src = {
+       .ns_reg = 0x54,
+       .md_reg = 0x58,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_pcm,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcm_clk_out = {
+       .halt_reg = 0x5c,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk_out",
+                       .parent_names = (const char *[]){ "pcm_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+       .reg = 0x54,
+       .shift = 10,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk",
+                       .parent_names = (const char *[]){
+                               "pcm_clk_out",
+                               "pcm_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_aif_osr[] = {
+       {  22050, P_PLL4, 1, 147, 20480 },
+       {  32000, P_PLL4, 1,   1,    96 },
+       {  44100, P_PLL4, 1, 147, 10240 },
+       {  48000, P_PLL4, 1,   1,    64 },
+       {  88200, P_PLL4, 1, 147,  5120 },
+       {  96000, P_PLL4, 1,   1,    32 },
+       { 176400, P_PLL4, 1, 147,  2560 },
+       { 192000, P_PLL4, 1,   1,    16 },
+       { },
+};
+
+static struct clk_rcg spdif_src = {
+       .ns_reg = 0xcc,
+       .md_reg = 0xd0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "spdif_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_spdif_parents[] = {
+       "spdif_src",
+};
+
+static struct clk_branch spdif_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "spdif_clk",
+                       .parent_names = lcc_spdif_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_ahbix[] = {
+       { 131072, P_PLL4, 1, 1, 3 },
+       { },
+};
+
+static struct clk_rcg ahbix_clk = {
+       .ns_reg = 0x38,
+       .md_reg = 0x3c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_ahbix,
+       .clkr = {
+               .enable_reg = 0x38,
+               .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
+               .hw.init = &(struct clk_init_data){
+                       .name = "ahbix",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_regmap *lcc_ipq806x_clks[] = {
+       [PLL4] = &pll4.clkr,
+       [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+       [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+       [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+       [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+       [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+       [PCM_SRC] = &pcm_src.clkr,
+       [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+       [PCM_CLK] = &pcm_clk.clkr,
+       [SPDIF_SRC] = &spdif_src.clkr,
+       [SPDIF_CLK] = &spdif_clk.clkr,
+       [AHBIX_CLK] = &ahbix_clk.clkr,
+};
+
+static const struct regmap_config lcc_ipq806x_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xfc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc lcc_ipq806x_desc = {
+       .config = &lcc_ipq806x_regmap_config,
+       .clks = lcc_ipq806x_clks,
+       .num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
+};
+
+static const struct of_device_id lcc_ipq806x_match_table[] = {
+       { .compatible = "qcom,lcc-ipq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table);
+
+static int lcc_ipq806x_probe(struct platform_device *pdev)
+{
+       u32 val;
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Configure the rate of PLL4 if the bootloader hasn't already */
+       val = regmap_read(regmap, 0x0, &val);
+       if (!val)
+               clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+
+       return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
+}
+
+static int lcc_ipq806x_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver lcc_ipq806x_driver = {
+       .probe          = lcc_ipq806x_probe,
+       .remove         = lcc_ipq806x_remove,
+       .driver         = {
+               .name   = "lcc-ipq806x",
+               .owner  = THIS_MODULE,
+               .of_match_table = lcc_ipq806x_match_table,
+       },
+};
+module_platform_driver(lcc_ipq806x_driver);
+
+MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-ipq806x");
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
new file mode 100644 (file)
index 0000000..a75a408
--- /dev/null
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-msm8960.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+       .l_reg = 0x4,
+       .m_reg = 0x8,
+       .n_reg = 0xc,
+       .config_reg = 0x14,
+       .mode_reg = 0x0,
+       .status_reg = 0x18,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll4",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+#define P_PXO  0
+#define P_PLL4 1
+
+static const u8 lcc_pxo_pll4_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL4]        = 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+       "pxo",
+       "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_osr_492[] = {
+       {   512000, P_PLL4, 4, 1, 240 },
+       {   768000, P_PLL4, 4, 1, 160 },
+       {  1024000, P_PLL4, 4, 1, 120 },
+       {  1536000, P_PLL4, 4, 1,  80 },
+       {  2048000, P_PLL4, 4, 1,  60 },
+       {  3072000, P_PLL4, 4, 1,  40 },
+       {  4096000, P_PLL4, 4, 1,  30 },
+       {  6144000, P_PLL4, 4, 1,  20 },
+       {  8192000, P_PLL4, 4, 1,  15 },
+       { 12288000, P_PLL4, 4, 1,  10 },
+       { 24576000, P_PLL4, 4, 1,   5 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct freq_tbl clk_tbl_aif_osr_393[] = {
+       {   512000, P_PLL4, 4, 1, 192 },
+       {   768000, P_PLL4, 4, 1, 128 },
+       {  1024000, P_PLL4, 4, 1,  96 },
+       {  1536000, P_PLL4, 4, 1,  64 },
+       {  2048000, P_PLL4, 4, 1,  48 },
+       {  3072000, P_PLL4, 4, 1,  32 },
+       {  4096000, P_PLL4, 4, 1,  24 },
+       {  6144000, P_PLL4, 4, 1,  16 },
+       {  8192000, P_PLL4, 4, 1,  12 },
+       { 12288000, P_PLL4, 4, 1,   8 },
+       { 24576000, P_PLL4, 4, 1,   4 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+       .ns_reg = 0x48,
+       .md_reg = 0x4c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr_393,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_mi2s_parents[] = {
+       "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+       .reg = 0x48,
+       .shift = 10,
+       .width = 4,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_div_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_div_clk",
+                       .parent_names = (const char *[]){ "mi2s_div_clk" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+       .reg = 0x48,
+       .shift = 14,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_clk",
+                       .parent_names = (const char *[]){
+                               "mi2s_bit_div_clk",
+                               "mi2s_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr)                  \
+static struct clk_rcg prefix##_osr_src = {                     \
+       .ns_reg = _ns,                                          \
+       .md_reg = _md,                                          \
+       .mn = {                                                 \
+               .mnctr_en_bit = 8,                              \
+               .mnctr_reset_bit = 7,                           \
+               .mnctr_mode_shift = 5,                          \
+               .n_val_shift = 24,                              \
+               .m_val_shift = 8,                               \
+               .width = 8,                                     \
+       },                                                      \
+       .p = {                                                  \
+               .pre_div_shift = 3,                             \
+               .pre_div_width = 2,                             \
+       },                                                      \
+       .s = {                                                  \
+               .src_sel_shift = 0,                             \
+               .parent_map = lcc_pxo_pll4_map,                 \
+       },                                                      \
+       .freq_tbl = clk_tbl_aif_osr_393,                        \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(9),                          \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_osr_src",             \
+                       .parent_names = lcc_pxo_pll4,           \
+                       .num_parents = 2,                       \
+                       .ops = &clk_rcg_ops,                    \
+                       .flags = CLK_SET_RATE_GATE,             \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static const char *lcc_##prefix##_parents[] = {                        \
+       #prefix "_osr_src",                                     \
+};                                                             \
+                                                               \
+static struct clk_branch prefix##_osr_clk = {                  \
+       .halt_reg = hr,                                         \
+       .halt_bit = 1,                                          \
+       .halt_check = BRANCH_HALT_ENABLE,                       \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(21),                         \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_osr_clk",             \
+                       .parent_names = lcc_##prefix##_parents, \
+                       .num_parents = 1,                       \
+                       .ops = &clk_branch_ops,                 \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_regmap_div prefix##_div_clk = {              \
+       .reg = _ns,                                             \
+       .shift = 10,                                            \
+       .width = 8,                                             \
+       .clkr = {                                               \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_div_clk",             \
+                       .parent_names = lcc_##prefix##_parents, \
+                       .num_parents = 1,                       \
+                       .ops = &clk_regmap_div_ops,             \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_branch prefix##_bit_div_clk = {              \
+       .halt_reg = hr,                                         \
+       .halt_bit = 0,                                          \
+       .halt_check = BRANCH_HALT_ENABLE,                       \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(19),                         \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_bit_div_clk",         \
+                       .parent_names = (const char *[]){       \
+                               #prefix "_div_clk"              \
+                       },                                      \
+                       .num_parents = 1,                       \
+                       .ops = &clk_branch_ops,                 \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_regmap_mux prefix##_bit_clk = {              \
+       .reg = _ns,                                             \
+       .shift = 18,                                            \
+       .width = 1,                                             \
+       .clkr = {                                               \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_bit_clk",             \
+                       .parent_names = (const char *[]){       \
+                               #prefix "_bit_div_clk",         \
+                               #prefix "_codec_clk",           \
+                       },                                      \
+                       .num_parents = 2,                       \
+                       .ops = &clk_regmap_mux_closest_ops,     \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+}
+
+CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
+CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
+CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
+CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
+
+static struct freq_tbl clk_tbl_pcm_492[] = {
+       {   256000, P_PLL4, 4, 1, 480 },
+       {   512000, P_PLL4, 4, 1, 240 },
+       {   768000, P_PLL4, 4, 1, 160 },
+       {  1024000, P_PLL4, 4, 1, 120 },
+       {  1536000, P_PLL4, 4, 1,  80 },
+       {  2048000, P_PLL4, 4, 1,  60 },
+       {  3072000, P_PLL4, 4, 1,  40 },
+       {  4096000, P_PLL4, 4, 1,  30 },
+       {  6144000, P_PLL4, 4, 1,  20 },
+       {  8192000, P_PLL4, 4, 1,  15 },
+       { 12288000, P_PLL4, 4, 1,  10 },
+       { 24576000, P_PLL4, 4, 1,   5 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct freq_tbl clk_tbl_pcm_393[] = {
+       {   256000, P_PLL4, 4, 1, 384 },
+       {   512000, P_PLL4, 4, 1, 192 },
+       {   768000, P_PLL4, 4, 1, 128 },
+       {  1024000, P_PLL4, 4, 1,  96 },
+       {  1536000, P_PLL4, 4, 1,  64 },
+       {  2048000, P_PLL4, 4, 1,  48 },
+       {  3072000, P_PLL4, 4, 1,  32 },
+       {  4096000, P_PLL4, 4, 1,  24 },
+       {  6144000, P_PLL4, 4, 1,  16 },
+       {  8192000, P_PLL4, 4, 1,  12 },
+       { 12288000, P_PLL4, 4, 1,   8 },
+       { 24576000, P_PLL4, 4, 1,   4 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct clk_rcg pcm_src = {
+       .ns_reg = 0x54,
+       .md_reg = 0x58,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_pcm_393,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcm_clk_out = {
+       .halt_reg = 0x5c,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk_out",
+                       .parent_names = (const char *[]){ "pcm_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+       .reg = 0x54,
+       .shift = 10,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk",
+                       .parent_names = (const char *[]){
+                               "pcm_clk_out",
+                               "pcm_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg slimbus_src = {
+       .ns_reg = 0xcc,
+       .md_reg = 0xd0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr_393,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "slimbus_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_slimbus_parents[] = {
+       "slimbus_src",
+};
+
+static struct clk_branch audio_slimbus_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "audio_slimbus_clk",
+                       .parent_names = lcc_slimbus_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sps_slimbus_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sps_slimbus_clk",
+                       .parent_names = lcc_slimbus_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap *lcc_msm8960_clks[] = {
+       [PLL4] = &pll4.clkr,
+       [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+       [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+       [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+       [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+       [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+       [PCM_SRC] = &pcm_src.clkr,
+       [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+       [PCM_CLK] = &pcm_clk.clkr,
+       [SLIMBUS_SRC] = &slimbus_src.clkr,
+       [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
+       [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
+       [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
+       [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
+       [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
+       [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
+       [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
+       [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
+       [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
+       [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
+       [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
+       [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
+       [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
+       [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
+       [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
+       [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
+       [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
+       [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
+       [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
+       [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
+       [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
+       [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
+};
+
+static const struct regmap_config lcc_msm8960_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xfc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc lcc_msm8960_desc = {
+       .config = &lcc_msm8960_regmap_config,
+       .clks = lcc_msm8960_clks,
+       .num_clks = ARRAY_SIZE(lcc_msm8960_clks),
+};
+
+static const struct of_device_id lcc_msm8960_match_table[] = {
+       { .compatible = "qcom,lcc-msm8960" },
+       { .compatible = "qcom,lcc-apq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table);
+
+static int lcc_msm8960_probe(struct platform_device *pdev)
+{
+       u32 val;
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &lcc_msm8960_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Use the correct frequency plan depending on speed of PLL4 */
+       val = regmap_read(regmap, 0x4, &val);
+       if (val == 0x12) {
+               slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
+               mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               pcm_src.freq_tbl = clk_tbl_pcm_492;
+       }
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+
+       return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
+}
+
+static int lcc_msm8960_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver lcc_msm8960_driver = {
+       .probe          = lcc_msm8960_probe,
+       .remove         = lcc_msm8960_remove,
+       .driver         = {
+               .name   = "lcc-msm8960",
+               .owner  = THIS_MODULE,
+               .of_match_table = lcc_msm8960_match_table,
+       },
+};
+module_platform_driver(lcc_msm8960_driver);
+
+MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-msm8960");
index cbcddcc02475233f53b2d150f8395a99c3b4ba4d..05d7a0bc059907872ff1719d0c401a0494082199 100644 (file)
@@ -535,44 +535,44 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
                        RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 8, GFLAGS),
-       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
+       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(17), 0,
                        RK3288_CLKGATE_CON(1), 9, GFLAGS),
-       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0,
+       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
        MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
        COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 10, GFLAGS),
-       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0,
+       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(18), 0,
                        RK3288_CLKGATE_CON(1), 11, GFLAGS),
-       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0,
+       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 12, GFLAGS),
-       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0,
+       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(19), 0,
                        RK3288_CLKGATE_CON(1), 13, GFLAGS),
-       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0,
+       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 14, GFLAGS),
-       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0,
+       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(20), 0,
                        RK3288_CLKGATE_CON(1), 15, GFLAGS),
-       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0,
+       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(2), 12, GFLAGS),
-       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0,
+       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(7), 0,
                        RK3288_CLKGATE_CON(2), 13, GFLAGS),
-       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
+       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
 
        COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
@@ -598,7 +598,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(0, "jtag", "ext_jtag", 0,
                        RK3288_CLKGATE_CON(4), 14, GFLAGS),
 
-       COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
+       COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
                        RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
                        RK3288_CLKGATE_CON(5), 14, GFLAGS),
        COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
@@ -704,8 +704,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 
        GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
        GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
-       GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
-       GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
+       GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
+       GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
        GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
 
        /* sclk_gpu gates */
@@ -805,6 +805,20 @@ static int rk3288_clk_suspend(void)
                rk3288_saved_cru_regs[i] =
                                readl_relaxed(rk3288_cru_base + reg_id);
        }
+
+       /*
+        * Switch PLLs other than DPLL (for SDRAM) to slow mode to
+        * avoid crashes on resume. The Mask ROM on the system will
+        * put APLL, CPLL, and GPLL into slow mode at resume time
+        * anyway (which is why we restore them), but we might not
+        * even make it to the Mask ROM if this isn't done at suspend
+        * time.
+        *
+        * NOTE: only APLL truly matters here, but we'll do them all.
+        */
+
+       writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
+
        return 0;
 }
 
@@ -866,6 +880,14 @@ static void __init rk3288_clk_init(struct device_node *np)
                pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
                        __func__, PTR_ERR(clk));
 
+       /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
+       clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+                       __func__, PTR_ERR(clk));
+       else
+               rockchip_clk_add_lookup(clk, PCLK_WDT);
+
        rockchip_clk_register_plls(rk3288_pll_clks,
                                   ARRAY_SIZE(rk3288_pll_clks),
                                   RK3288_GRF_SOC_STATUS1);
index f2c2ccce49bb1ad00f7502e9d4e4236fc6bcbe54..454b02ae486a86917f31614c2c4b016841deefaa 100644 (file)
@@ -82,6 +82,26 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
        {},
 };
 
+static void exynos_audss_clk_teardown(void)
+{
+       int i;
+
+       for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_mux(clk_table[i]);
+       }
+
+       for (; i < EXYNOS_SRP_CLK; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_divider(clk_table[i]);
+       }
+
+       for (; i < clk_data.clk_num; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_gate(clk_table[i]);
+       }
+}
+
 /* register exynos_audss clocks */
 static int exynos_audss_clk_probe(struct platform_device *pdev)
 {
@@ -219,10 +239,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
        return 0;
 
 unregister:
-       for (i = 0; i < clk_data.clk_num; i++) {
-               if (!IS_ERR(clk_table[i]))
-                       clk_unregister(clk_table[i]);
-       }
+       exynos_audss_clk_teardown();
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
@@ -232,18 +249,13 @@ unregister:
 
 static int exynos_audss_clk_remove(struct platform_device *pdev)
 {
-       int i;
-
 #ifdef CONFIG_PM_SLEEP
        unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
 #endif
 
        of_clk_del_provider(pdev->dev.of_node);
 
-       for (i = 0; i < clk_data.clk_num; i++) {
-               if (!IS_ERR(clk_table[i]))
-                       clk_unregister(clk_table[i]);
-       }
+       exynos_audss_clk_teardown();
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
index 6e6cca3920829b5a7dc0685a071389a2d201d780..cc4c348d8a24038007375bcf53ba45a96bcd9ac9 100644 (file)
 #define PWR_CTRL1_USE_CORE1_WFI                        (1 << 1)
 #define PWR_CTRL1_USE_CORE0_WFI                        (1 << 0)
 
-/* list of PLLs to be registered */
-enum exynos3250_plls {
-       apll, mpll, vpll, upll,
-       nr_plls
-};
-
-/* list of PLLs in DMC block to be registered */
-enum exynos3250_dmc_plls {
-       bpll, epll,
-       nr_dmc_plls
-};
-
-static void __iomem *reg_base;
-static void __iomem *dmc_reg_base;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_clk_regs;
-
 static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        SRC_LEFTBUS,
        DIV_LEFTBUS,
@@ -195,43 +174,6 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        PWR_CTRL2,
 };
 
-static int exynos3250_clk_suspend(void)
-{
-       samsung_clk_save(reg_base, exynos3250_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_clk_regs));
-       return 0;
-}
-
-static void exynos3250_clk_resume(void)
-{
-       samsung_clk_restore(reg_base, exynos3250_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos3250_clk_syscore_ops = {
-       .suspend = exynos3250_clk_suspend,
-       .resume = exynos3250_clk_resume,
-};
-
-static void exynos3250_clk_sleep_init(void)
-{
-       exynos3250_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
-                                          ARRAY_SIZE(exynos3250_cmu_clk_regs));
-       if (!exynos3250_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               goto err;
-       }
-
-       register_syscore_ops(&exynos3250_clk_syscore_ops);
-       return;
-err:
-       kfree(exynos3250_clk_regs);
-}
-#else
-static inline void exynos3250_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_vpllsrc_p)          = { "fin_pll", };
 
@@ -782,18 +724,18 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
        { /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = {
-       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-                       APLL_LOCK, APLL_CON0, NULL),
-       [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
-                       MPLL_LOCK, MPLL_CON0, NULL),
-       [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
-                       VPLL_LOCK, VPLL_CON0, NULL),
-       [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
-                       UPLL_LOCK, UPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+                       MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
+                       VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates),
+       PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
+                       UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
 };
 
-static void __init exynos3_core_down_clock(void)
+static void __init exynos3_core_down_clock(void __iomem *reg_base)
 {
        unsigned int tmp;
 
@@ -814,38 +756,31 @@ static void __init exynos3_core_down_clock(void)
        __raw_writel(0x0, reg_base + PWR_CTRL2);
 }
 
+static struct samsung_cmu_info cmu_info __initdata = {
+       .pll_clks               = exynos3250_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos3250_plls),
+       .mux_clks               = mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(mux_clks),
+       .div_clks               = div_clks,
+       .nr_div_clks            = ARRAY_SIZE(div_clks),
+       .gate_clks              = gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(gate_clks),
+       .fixed_factor_clks      = fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(fixed_factor_clks),
+       .nr_clk_ids             = CLK_NR_CLKS,
+       .clk_regs               = exynos3250_cmu_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos3250_cmu_clk_regs),
+};
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
        struct samsung_clk_provider *ctx;
 
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+       ctx = samsung_cmu_register_one(np, &cmu_info);
        if (!ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
-                                         ARRAY_SIZE(fixed_factor_clks));
-
-       exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
-       exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
-       exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
-       exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
-
-       samsung_clk_register_pll(ctx, exynos3250_plls,
-                                       ARRAY_SIZE(exynos3250_plls), reg_base);
-
-       samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
-       samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
-       samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
-
-       exynos3_core_down_clock();
+               return;
 
-       exynos3250_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, ctx);
+       exynos3_core_down_clock(ctx->reg_base);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 
@@ -872,12 +807,6 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 #define EPLL_CON2              0x111c
 #define SRC_EPLL               0x1120
 
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
-
 static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
        BPLL_LOCK,
        BPLL_CON0,
@@ -899,43 +828,6 @@ static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
        SRC_EPLL,
 };
 
-static int exynos3250_dmc_clk_suspend(void)
-{
-       samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-       return 0;
-}
-
-static void exynos3250_dmc_clk_resume(void)
-{
-       samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
-       .suspend = exynos3250_dmc_clk_suspend,
-       .resume = exynos3250_dmc_clk_resume,
-};
-
-static void exynos3250_dmc_clk_sleep_init(void)
-{
-       exynos3250_dmc_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
-                                  ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-       if (!exynos3250_dmc_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               goto err;
-       }
-
-       register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
-       return;
-err:
-       kfree(exynos3250_dmc_clk_regs);
-}
-#else
-static inline void exynos3250_dmc_clk_sleep_init(void) { }
-#endif
-
 PNAME(mout_epll_p)     = { "fin_pll", "fout_epll", };
 PNAME(mout_bpll_p)     = { "fin_pll", "fout_bpll", };
 PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
@@ -977,43 +869,28 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = {
        DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
 };
 
-static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = {
-       [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
-                       BPLL_LOCK, BPLL_CON0, NULL),
-       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-                       EPLL_LOCK, EPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
+};
+
+static struct samsung_cmu_info dmc_cmu_info __initdata = {
+       .pll_clks               = exynos3250_dmc_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos3250_dmc_plls),
+       .mux_clks               = dmc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(dmc_mux_clks),
+       .div_clks               = dmc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(dmc_div_clks),
+       .nr_clk_ids             = NR_CLKS_DMC,
+       .clk_regs               = exynos3250_cmu_dmc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs),
 };
 
 static void __init exynos3250_cmu_dmc_init(struct device_node *np)
 {
-       struct samsung_clk_provider *ctx;
-
-       dmc_reg_base = of_iomap(np, 0);
-       if (!dmc_reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
-       if (!ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
-       exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
-
-       pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
-                       exynos3250_dmc_plls[bpll].rate_table[0].rate,
-                       exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
-                       exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
-                       exynos3250_dmc_plls[bpll].rate_table[0].sdiv
-             );
-       samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
-                               ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
-
-       samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
-       samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
-
-       exynos3250_dmc_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, ctx);
+       samsung_cmu_register_one(np, &dmc_cmu_info);
 }
 CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
                exynos3250_cmu_dmc_init);
index 88e8c6bbd77ff8ea6919bc6c1ef7bd3fdc1324ae..51462e85675f7f6ed04dc6d3f38891c400aea7e4 100644 (file)
@@ -703,12 +703,12 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
 
 /* list of divider clocks supported in all exynos4 soc's */
 static struct samsung_div_clock exynos4_div_clks[] __initdata = {
-       DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
+       DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
        DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
        DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
                        CLKOUT_CMU_LEFTBUS, 8, 6),
 
-       DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
+       DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
        DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
        DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
                        CLKOUT_CMU_RIGHTBUS, 8, 6),
@@ -781,10 +781,10 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
                        CLK_SET_RATE_PARENT, 0),
        DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
 
-       DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
+       DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
        DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
        DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
-       DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
+       DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
        DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
        DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
        DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
@@ -829,7 +829,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
        DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
                                                8, 3, CLK_GET_RATE_NOCACHE, 0),
        DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
-       DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+       DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
        DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
index 2123fc251e0f6960b7566759088914162b840c89..6c78b09c829f9148453f56d4f05981452f91487c 100644 (file)
 #define DIV_CPU0               0x14500
 #define DIV_CPU1               0x14504
 
-enum exynos4415_plls {
-       apll, epll, g3d_pll, isp_pll, disp_pll,
-       nr_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_ctx;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_clk_regs;
-
 static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
        SRC_LEFTBUS,
        DIV_LEFTBUS,
@@ -219,41 +206,6 @@ static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
        DIV_CPU1,
 };
 
-static int exynos4415_clk_suspend(void)
-{
-       samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
-
-       return 0;
-}
-
-static void exynos4415_clk_resume(void)
-{
-       samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos4415_clk_syscore_ops = {
-       .suspend = exynos4415_clk_suspend,
-       .resume = exynos4415_clk_resume,
-};
-
-static void exynos4415_clk_sleep_init(void)
-{
-       exynos4415_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
-                                       ARRAY_SIZE(exynos4415_cmu_clk_regs));
-       if (!exynos4415_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               return;
-       }
-
-       register_syscore_ops(&exynos4415_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_g3d_pllsrc_p)       = { "fin_pll", };
 
@@ -959,56 +911,40 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
        { /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
-       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-                       APLL_LOCK, APLL_CON0, NULL),
-       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-                       EPLL_LOCK, EPLL_CON0, NULL),
-       [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
-                       "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
-       [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
-                       ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
-       [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
-                       "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates),
+       PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc",
+               G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
+               ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
+               "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_info __initdata = {
+       .pll_clks               = exynos4415_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos4415_plls),
+       .mux_clks               = exynos4415_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(exynos4415_mux_clks),
+       .div_clks               = exynos4415_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(exynos4415_div_clks),
+       .gate_clks              = exynos4415_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(exynos4415_gate_clks),
+       .fixed_clks             = exynos4415_fixed_rate_clks,
+       .nr_fixed_clks          = ARRAY_SIZE(exynos4415_fixed_rate_clks),
+       .fixed_factor_clks      = exynos4415_fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(exynos4415_fixed_factor_clks),
+       .nr_clk_ids             = CLK_NR_CLKS,
+       .clk_regs               = exynos4415_cmu_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos4415_cmu_clk_regs),
 };
 
 static void __init exynos4415_cmu_init(struct device_node *np)
 {
-       void __iomem *reg_base;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
-       if (!exynos4415_ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
-       exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
-
-       samsung_clk_register_fixed_factor(exynos4415_ctx,
-                               exynos4415_fixed_factor_clks,
-                               ARRAY_SIZE(exynos4415_fixed_factor_clks));
-       samsung_clk_register_fixed_rate(exynos4415_ctx,
-                               exynos4415_fixed_rate_clks,
-                               ARRAY_SIZE(exynos4415_fixed_rate_clks));
-
-       samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
-                               ARRAY_SIZE(exynos4415_plls), reg_base);
-       samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
-                               ARRAY_SIZE(exynos4415_mux_clks));
-       samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
-                               ARRAY_SIZE(exynos4415_div_clks));
-       samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
-                               ARRAY_SIZE(exynos4415_gate_clks));
-
-       exynos4415_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, exynos4415_ctx);
+       samsung_cmu_register_one(np, &cmu_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 
@@ -1027,16 +963,6 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 #define SRC_DMC                        0x300
 #define DIV_DMC1               0x504
 
-enum exynos4415_dmc_plls {
-       mpll, bpll,
-       nr_dmc_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_dmc_ctx;
-
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
-
 static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
        MPLL_LOCK,
        MPLL_CON0,
@@ -1050,42 +976,6 @@ static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
        DIV_DMC1,
 };
 
-static int exynos4415_dmc_clk_suspend(void)
-{
-       samsung_clk_save(exynos4415_dmc_ctx->reg_base,
-                               exynos4415_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-       return 0;
-}
-
-static void exynos4415_dmc_clk_resume(void)
-{
-       samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
-                               exynos4415_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
-       .suspend = exynos4415_dmc_clk_suspend,
-       .resume = exynos4415_dmc_clk_resume,
-};
-
-static void exynos4415_dmc_clk_sleep_init(void)
-{
-       exynos4415_dmc_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-       if (!exynos4415_dmc_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               return;
-       }
-
-       register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_dmc_clk_sleep_init(void) { }
-#endif /* CONFIG_PM_SLEEP */
-
 PNAME(mout_mpll_p)             = { "fin_pll", "fout_mpll", };
 PNAME(mout_bpll_p)             = { "fin_pll", "fout_bpll", };
 PNAME(mbpll_p)                 = { "mout_mpll", "mout_bpll", };
@@ -1107,38 +997,28 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
        DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
 };
 
-static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
-       [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
-               MPLL_LOCK, MPLL_CON0, NULL),
-       [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
-               BPLL_LOCK, BPLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
+       PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
+               MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_dmc_info __initdata = {
+       .pll_clks               = exynos4415_dmc_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos4415_dmc_plls),
+       .mux_clks               = exynos4415_dmc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(exynos4415_dmc_mux_clks),
+       .div_clks               = exynos4415_dmc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(exynos4415_dmc_div_clks),
+       .nr_clk_ids             = NR_CLKS_DMC,
+       .clk_regs               = exynos4415_cmu_dmc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs),
 };
 
 static void __init exynos4415_cmu_dmc_init(struct device_node *np)
 {
-       void __iomem *reg_base;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
-       if (!exynos4415_dmc_ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
-       exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
-
-       samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
-                               ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
-       samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
-                               ARRAY_SIZE(exynos4415_dmc_mux_clks));
-       samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
-                               ARRAY_SIZE(exynos4415_dmc_div_clks));
-
-       exynos4415_dmc_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
+       samsung_cmu_register_one(np, &cmu_dmc_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
                exynos4415_cmu_dmc_init);
index ea4483b8d62e8a89ea2ef1c1d37b1e720c5d9f68..03d36e847b78e067d16bdfe1758df10560c4a2b4 100644 (file)
@@ -34,6 +34,7 @@
 #define DIV_TOPC0              0x0600
 #define DIV_TOPC1              0x0604
 #define DIV_TOPC3              0x060C
+#define ENABLE_ACLK_TOPC1      0x0804
 
 static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
        FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
@@ -45,6 +46,7 @@ static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
 };
 
 /* List of parent clocks for Muxes in CMU_TOPC */
+PNAME(mout_aud_pll_ctrl_p)     = { "fin_pll", "fout_aud_pll" };
 PNAME(mout_bus0_pll_ctrl_p)    = { "fin_pll", "fout_bus0_pll" };
 PNAME(mout_bus1_pll_ctrl_p)    = { "fin_pll", "fout_bus1_pll" };
 PNAME(mout_cc_pll_ctrl_p)      = { "fin_pll", "fout_cc_pll" };
@@ -104,9 +106,11 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = {
 
        MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
                MUX_SEL_TOPC1, 16, 1),
+       MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
 
        MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
 
+       MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2),
        MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
 };
 
@@ -114,6 +118,8 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
        DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
                DIV_TOPC0, 4, 4),
 
+       DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532",
+               DIV_TOPC1, 20, 4),
        DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
                DIV_TOPC1, 24, 4),
 
@@ -125,6 +131,18 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
                DIV_TOPC3, 12, 3),
        DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
                DIV_TOPC3, 16, 3),
+       DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
+               DIV_TOPC3, 28, 3),
+};
+
+static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
+       PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+       {},
+};
+
+static struct samsung_gate_clock topc_gate_clks[] __initdata = {
+       GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
+               ENABLE_ACLK_TOPC1, 20, 0, 0),
 };
 
 static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -136,8 +154,8 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = {
                BUS1_DPLL_CON0, NULL),
        PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
                MFC_PLL_CON0, NULL),
-       PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
-               AUD_PLL_CON0, NULL),
+       PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
+               AUD_PLL_CON0, pll1460x_24mhz_tbl),
 };
 
 static struct samsung_cmu_info topc_cmu_info __initdata = {
@@ -147,6 +165,8 @@ static struct samsung_cmu_info topc_cmu_info __initdata = {
        .nr_mux_clks            = ARRAY_SIZE(topc_mux_clks),
        .div_clks               = topc_div_clks,
        .nr_div_clks            = ARRAY_SIZE(topc_div_clks),
+       .gate_clks              = topc_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(topc_gate_clks),
        .fixed_factor_clks      = topc_fixed_factor_clks,
        .nr_fixed_factor_clks   = ARRAY_SIZE(topc_fixed_factor_clks),
        .nr_clk_ids             = TOPC_NR_CLK,
@@ -166,9 +186,18 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
 #define MUX_SEL_TOP00                  0x0200
 #define MUX_SEL_TOP01                  0x0204
 #define MUX_SEL_TOP03                  0x020C
+#define MUX_SEL_TOP0_PERIC0            0x0230
+#define MUX_SEL_TOP0_PERIC1            0x0234
+#define MUX_SEL_TOP0_PERIC2            0x0238
 #define MUX_SEL_TOP0_PERIC3            0x023C
 #define DIV_TOP03                      0x060C
+#define DIV_TOP0_PERIC0                        0x0630
+#define DIV_TOP0_PERIC1                        0x0634
+#define DIV_TOP0_PERIC2                        0x0638
 #define DIV_TOP0_PERIC3                        0x063C
+#define ENABLE_SCLK_TOP0_PERIC0                0x0A30
+#define ENABLE_SCLK_TOP0_PERIC1                0x0A34
+#define ENABLE_SCLK_TOP0_PERIC2                0x0A38
 #define ENABLE_SCLK_TOP0_PERIC3                0x0A3C
 
 /* List of parent clocks for Muxes in CMU_TOP0 */
@@ -176,6 +205,7 @@ PNAME(mout_bus0_pll_p)      = { "fin_pll", "dout_sclk_bus0_pll" };
 PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
 PNAME(mout_cc_pll_p)   = { "fin_pll", "dout_sclk_cc_pll" };
 PNAME(mout_mfc_pll_p)  = { "fin_pll", "dout_sclk_mfc_pll" };
+PNAME(mout_aud_pll_p)  = { "fin_pll", "dout_sclk_aud_pll" };
 
 PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
        "ffac_top0_bus0_pll_div2"};
@@ -189,18 +219,34 @@ PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
 PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
        "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
        "mout_top0_half_mfc_pll"};
+PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
+       "ioclk_audiocdclk1", "ioclk_spdif_extclk",
+       "mout_top0_aud_pll", "mout_top0_half_bus0_pll",
+       "mout_top0_half_bus1_pll"};
+PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
+       "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
 
 static unsigned long top0_clk_regs[] __initdata = {
        MUX_SEL_TOP00,
        MUX_SEL_TOP01,
        MUX_SEL_TOP03,
+       MUX_SEL_TOP0_PERIC0,
+       MUX_SEL_TOP0_PERIC1,
+       MUX_SEL_TOP0_PERIC2,
        MUX_SEL_TOP0_PERIC3,
        DIV_TOP03,
+       DIV_TOP0_PERIC0,
+       DIV_TOP0_PERIC1,
+       DIV_TOP0_PERIC2,
        DIV_TOP0_PERIC3,
+       ENABLE_SCLK_TOP0_PERIC0,
+       ENABLE_SCLK_TOP0_PERIC1,
+       ENABLE_SCLK_TOP0_PERIC2,
        ENABLE_SCLK_TOP0_PERIC3,
 };
 
 static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+       MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
        MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
        MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
        MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
@@ -218,10 +264,20 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
        MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
 
+       MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3),
+       MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2),
+       MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2),
+
+       MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2),
+       MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2),
+
+       MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2),
+       MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2),
        MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
        MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
        MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
        MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
+       MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
 };
 
 static struct samsung_div_clock top0_div_clks[] __initdata = {
@@ -230,13 +286,40 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
        DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
                DIV_TOP03, 20, 6),
 
+       DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4),
+       DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12),
+       DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10),
+
+       DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12),
+       DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12),
+
+       DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12),
+       DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12),
+
        DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
        DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
        DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
        DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
+       DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
 };
 
 static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+       GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
+               ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
+               ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1",
+               ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1",
+               ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0",
+               ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3",
+               ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2",
+               ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0),
        GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
                ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
        GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
@@ -245,6 +328,8 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
                ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
        GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
                ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
+       GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4",
+               ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
@@ -343,6 +428,8 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
 
        MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+       MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
+               MUX_SEL_TOP1_FSYS0, 28, 2),
 
        MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
        MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
@@ -356,6 +443,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
 
        DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
                DIV_TOP1_FSYS0, 24, 4),
+       DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
+               DIV_TOP1_FSYS0, 28, 4),
 
        DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
                DIV_TOP1_FSYS1, 24, 4),
@@ -366,6 +455,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
 static struct samsung_gate_clock top1_gate_clks[] __initdata = {
        GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
                ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
+               ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
 
        GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
                ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
@@ -514,6 +605,7 @@ static void __init exynos7_clk_peric0_init(struct device_node *np)
 /* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
 #define MUX_SEL_PERIC10                        0x0200
 #define MUX_SEL_PERIC11                        0x0204
+#define MUX_SEL_PERIC12                        0x0208
 #define ENABLE_PCLK_PERIC1             0x0900
 #define ENABLE_SCLK_PERIC10            0x0A00
 
@@ -525,10 +617,16 @@ PNAME(mout_aclk_peric1_66_p)      = { "fin_pll", "dout_aclk_peric1_66" };
 PNAME(mout_sclk_uart1_p)       = { "fin_pll", "sclk_uart1" };
 PNAME(mout_sclk_uart2_p)       = { "fin_pll", "sclk_uart2" };
 PNAME(mout_sclk_uart3_p)       = { "fin_pll", "sclk_uart3" };
+PNAME(mout_sclk_spi0_p)                = { "fin_pll", "sclk_spi0" };
+PNAME(mout_sclk_spi1_p)                = { "fin_pll", "sclk_spi1" };
+PNAME(mout_sclk_spi2_p)                = { "fin_pll", "sclk_spi2" };
+PNAME(mout_sclk_spi3_p)                = { "fin_pll", "sclk_spi3" };
+PNAME(mout_sclk_spi4_p)                = { "fin_pll", "sclk_spi4" };
 
 static unsigned long peric1_clk_regs[] __initdata = {
        MUX_SEL_PERIC10,
        MUX_SEL_PERIC11,
+       MUX_SEL_PERIC12,
        ENABLE_PCLK_PERIC1,
        ENABLE_SCLK_PERIC10,
 };
@@ -537,6 +635,16 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
                MUX_SEL_PERIC10, 0, 1),
 
+       MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
+               MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
+               MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
+               MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
+               MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
+               MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
        MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
                MUX_SEL_PERIC11, 20, 1),
        MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
@@ -562,6 +670,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
                ENABLE_PCLK_PERIC1, 10, 0, 0),
        GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
                ENABLE_PCLK_PERIC1, 11, 0, 0),
+       GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 12, 0, 0),
+       GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 13, 0, 0),
+       GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 14, 0, 0),
+       GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 15, 0, 0),
+       GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 16, 0, 0),
+       GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0),
+       GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 18, 0, 0),
+       GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 19, 0, 0),
 
        GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
                ENABLE_SCLK_PERIC10, 9, 0, 0),
@@ -569,6 +693,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
                ENABLE_SCLK_PERIC10, 10, 0, 0),
        GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
                ENABLE_SCLK_PERIC10, 11, 0, 0),
+       GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user",
+               ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user",
+               ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user",
+               ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user",
+               ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user",
+               ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1",
+               ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1",
+               ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif",
+               ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_cmu_info peric1_cmu_info __initdata = {
@@ -647,7 +787,12 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
 /* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
 #define MUX_SEL_FSYS00                 0x0200
 #define MUX_SEL_FSYS01                 0x0204
+#define MUX_SEL_FSYS02                 0x0208
+#define ENABLE_ACLK_FSYS00             0x0800
 #define ENABLE_ACLK_FSYS01             0x0804
+#define ENABLE_SCLK_FSYS01             0x0A04
+#define ENABLE_SCLK_FSYS02             0x0A08
+#define ENABLE_SCLK_FSYS04             0x0A10
 
 /*
  * List of parent clocks for Muxes in CMU_FSYS0
@@ -655,10 +800,29 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
 PNAME(mout_aclk_fsys0_200_p)   = { "fin_pll", "dout_aclk_fsys0_200" };
 PNAME(mout_sclk_mmc2_p)                = { "fin_pll", "sclk_mmc2" };
 
+PNAME(mout_sclk_usbdrd300_p)   = { "fin_pll", "sclk_usbdrd300" };
+PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p)   = { "fin_pll",
+                               "phyclk_usbdrd300_udrd30_phyclock" };
+PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p)        = { "fin_pll",
+                               "phyclk_usbdrd300_udrd30_pipe_pclk" };
+
+/* fixed rate clocks used in the FSYS0 block */
+struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
+       FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
+               CLK_IS_ROOT, 60000000),
+       FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
+               CLK_IS_ROOT, 125000000),
+};
+
 static unsigned long fsys0_clk_regs[] __initdata = {
        MUX_SEL_FSYS00,
        MUX_SEL_FSYS01,
+       MUX_SEL_FSYS02,
+       ENABLE_ACLK_FSYS00,
        ENABLE_ACLK_FSYS01,
+       ENABLE_SCLK_FSYS01,
+       ENABLE_SCLK_FSYS02,
+       ENABLE_SCLK_FSYS04,
 };
 
 static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
@@ -666,11 +830,49 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
                MUX_SEL_FSYS00, 24, 1),
 
        MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
+       MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
+               MUX_SEL_FSYS01, 28, 1),
+
+       MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
+               MUX_SEL_FSYS02, 24, 1),
+       MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+               mout_phyclk_usbdrd300_udrd30_phyclk_p,
+               MUX_SEL_FSYS02, 28, 1),
 };
 
 static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+       GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
+               "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS00, 19, 0, 0),
+       GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
+                       ENABLE_ACLK_FSYS00, 3, 0, 0),
+       GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
+                       ENABLE_ACLK_FSYS00, 4, 0, 0),
+
+       GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS01, 29, 0, 0),
        GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
                ENABLE_ACLK_FSYS01, 31, 0, 0),
+
+       GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk",
+               "mout_sclk_usbdrd300_user",
+               ENABLE_SCLK_FSYS01, 4, 0, 0),
+       GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll",
+               ENABLE_SCLK_FSYS01, 8, 0, 0),
+
+       GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER,
+               "phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               ENABLE_SCLK_FSYS02, 24, 0, 0),
+       GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER,
+               "phyclk_usbdrd300_udrd30_phyclk_user",
+               "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+               ENABLE_SCLK_FSYS02, 28, 0, 0),
+
+       GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy",
+               "fin_pll",
+               ENABLE_SCLK_FSYS04, 28, 0, 0),
 };
 
 static struct samsung_cmu_info fsys0_cmu_info __initdata = {
@@ -741,3 +943,205 @@ static void __init exynos7_clk_fsys1_init(struct device_node *np)
 
 CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
        exynos7_clk_fsys1_init);
+
+#define MUX_SEL_MSCL                   0x0200
+#define DIV_MSCL                       0x0600
+#define ENABLE_ACLK_MSCL               0x0800
+#define ENABLE_PCLK_MSCL               0x0900
+
+/* List of parent clocks for Muxes in CMU_MSCL */
+PNAME(mout_aclk_mscl_532_user_p)       = { "fin_pll", "aclk_mscl_532" };
+
+static unsigned long mscl_clk_regs[] __initdata = {
+       MUX_SEL_MSCL,
+       DIV_MSCL,
+       ENABLE_ACLK_MSCL,
+       ENABLE_PCLK_MSCL,
+};
+
+static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+       MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
+               mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
+};
+static struct samsung_div_clock mscl_div_clks[] __initdata = {
+       DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
+                       DIV_MSCL, 0, 3),
+};
+static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+
+       GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 31, 0, 0),
+       GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 30, 0, 0),
+       GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 29, 0, 0),
+       GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 28, 0, 0),
+       GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 27, 0, 0),
+       GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 26, 0, 0),
+       GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 25, 0, 0),
+       GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 24, 0, 0),
+       GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 23, 0, 0),
+       GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 22, 0, 0),
+       GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 21, 0, 0),
+       GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 20, 0, 0),
+       GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 19, 0, 0),
+       GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 18, 0, 0),
+       GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 17, 0, 0),
+       GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 16, 0, 0),
+       GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 15, 0, 0),
+       GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 14, 0, 0),
+
+       GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 31, 0, 0),
+       GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 30, 0, 0),
+       GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 29, 0, 0),
+       GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 28, 0, 0),
+       GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 27, 0, 0),
+       GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 26, 0, 0),
+       GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 25, 0, 0),
+       GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 24, 0, 0),
+       GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 23, 0, 0),
+       GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 22, 0, 0),
+       GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 21, 0, 0),
+       GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 20, 0, 0),
+};
+
+static struct samsung_cmu_info mscl_cmu_info __initdata = {
+       .mux_clks               = mscl_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(mscl_mux_clks),
+       .div_clks               = mscl_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(mscl_div_clks),
+       .gate_clks              = mscl_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(mscl_gate_clks),
+       .nr_clk_ids             = MSCL_NR_CLK,
+       .clk_regs               = mscl_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(mscl_clk_regs),
+};
+
+static void __init exynos7_clk_mscl_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &mscl_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
+               exynos7_clk_mscl_init);
+
+/* Register Offset definitions for CMU_AUD (0x114C0000) */
+#define        MUX_SEL_AUD                     0x0200
+#define        DIV_AUD0                        0x0600
+#define        DIV_AUD1                        0x0604
+#define        ENABLE_ACLK_AUD                 0x0800
+#define        ENABLE_PCLK_AUD                 0x0900
+#define        ENABLE_SCLK_AUD                 0x0A00
+
+/*
+ * List of parent clocks for Muxes in CMU_AUD
+ */
+PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
+PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
+
+static unsigned long aud_clk_regs[] __initdata = {
+       MUX_SEL_AUD,
+       DIV_AUD0,
+       DIV_AUD1,
+       ENABLE_ACLK_AUD,
+       ENABLE_PCLK_AUD,
+       ENABLE_SCLK_AUD,
+};
+
+static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+       MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
+       MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
+       MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
+};
+
+static struct samsung_div_clock aud_div_clks[] __initdata = {
+       DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
+       DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
+       DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
+
+       DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4),
+       DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8),
+       DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4),
+       DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5),
+       DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
+};
+
+static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+       GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
+                       ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
+                       ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0),
+       GATE(0, "sclk_slimbus", "dout_sclk_slimbus",
+                       ENABLE_SCLK_AUD, 30, 0, 0),
+
+       GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0),
+       GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0),
+       GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0),
+       GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0),
+       GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0),
+       GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0),
+       GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud",
+                       ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0),
+       GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud",
+                       ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0),
+       GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0),
+
+       GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0),
+       GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud",
+                        ENABLE_ACLK_AUD, 28, 0, 0),
+       GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
+};
+
+static struct samsung_cmu_info aud_cmu_info __initdata = {
+       .mux_clks               = aud_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(aud_mux_clks),
+       .div_clks               = aud_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(aud_div_clks),
+       .gate_clks              = aud_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(aud_gate_clks),
+       .nr_clk_ids             = AUD_NR_CLK,
+       .clk_regs               = aud_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(aud_clk_regs),
+};
+
+static void __init exynos7_clk_aud_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &aud_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud",
+               exynos7_clk_aud_init);
index 4bda54095a16a0f3a1a670d4e949a4267f411041..9e1f88c04fd46dd583e1b2ccd90fc96c8cae06e8 100644 (file)
@@ -374,19 +374,24 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
  * Common function which registers plls, muxes, dividers and gates
  * for each CMU. It also add CMU register list to register cache.
  */
-void __init samsung_cmu_register_one(struct device_node *np,
+struct samsung_clk_provider * __init samsung_cmu_register_one(
+                       struct device_node *np,
                        struct samsung_cmu_info *cmu)
 {
        void __iomem *reg_base;
        struct samsung_clk_provider *ctx;
 
        reg_base = of_iomap(np, 0);
-       if (!reg_base)
+       if (!reg_base) {
                panic("%s: failed to map registers\n", __func__);
+               return NULL;
+       }
 
        ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
-       if (!ctx)
+       if (!ctx) {
                panic("%s: unable to alllocate ctx\n", __func__);
+               return ctx;
+       }
 
        if (cmu->pll_clks)
                samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
@@ -410,4 +415,6 @@ void __init samsung_cmu_register_one(struct device_node *np,
                        cmu->nr_clk_regs);
 
        samsung_clk_of_add_provider(np, ctx);
+
+       return ctx;
 }
index 8acabe1f32c4d4f13264eb26088b237778becf6b..e4c75383cea718c7fb563db70fbdd8452b1f9bd8 100644 (file)
@@ -392,7 +392,8 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
                        struct samsung_pll_clock *pll_list,
                        unsigned int nr_clk, void __iomem *base);
 
-extern void __init samsung_cmu_register_one(struct device_node *,
+extern struct samsung_clk_provider __init *samsung_cmu_register_one(
+                       struct device_node *,
                        struct samsung_cmu_info *);
 
 extern unsigned long _get_rate(const char *clk_name);
index f83980f2b9568ffa7df081bf5593518e04bcde0b..0689d7fb2666b1956728d85f9373b9cbd2c600b1 100644 (file)
@@ -1,9 +1,11 @@
 obj-$(CONFIG_ARCH_EMEV2)               += clk-emev2.o
 obj-$(CONFIG_ARCH_R7S72100)            += clk-rz.o
+obj-$(CONFIG_ARCH_R8A73A4)             += clk-r8a73a4.o
 obj-$(CONFIG_ARCH_R8A7740)             += clk-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7779)             += clk-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7791)             += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7793)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7794)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_SH73A0)              += clk-sh73a0.o
 obj-$(CONFIG_ARCH_SHMOBILE_MULTI)      += clk-div6.o
index 639241e31e03ec244907761ef430ed95bc53adde..036a692c72195db93760e40dc1fcb1928b1ecded 100644 (file)
@@ -54,12 +54,19 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
 static void cpg_div6_clock_disable(struct clk_hw *hw)
 {
        struct div6_clock *clock = to_div6_clock(hw);
+       u32 val;
 
-       /* DIV6 clocks require the divisor field to be non-zero when stopping
-        * the clock.
+       val = clk_readl(clock->reg);
+       val |= CPG_DIV6_CKSTP;
+       /*
+        * DIV6 clocks require the divisor field to be non-zero when stopping
+        * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be
+        * re-enabled later if the divisor field is changed when stopping the
+        * clock
         */
-       clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
-                  clock->reg);
+       if (!(val & CPG_DIV6_DIV_MASK))
+               val |= CPG_DIV6_DIV_MASK;
+       clk_writel(val, clock->reg);
 }
 
 static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
@@ -83,6 +90,9 @@ static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
 {
        unsigned int div;
 
+       if (!rate)
+               rate = 1;
+
        div = DIV_ROUND_CLOSEST(parent_rate, rate);
        return clamp_t(unsigned int, div, 1, 64);
 }
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
new file mode 100644 (file)
index 0000000..29b9a0b
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * r8a73a4 Core CPG Clocks
+ *
+ * Copyright (C) 2014  Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+struct r8a73a4_cpg {
+       struct clk_onecell_data data;
+       spinlock_t lock;
+       void __iomem *reg;
+};
+
+#define CPG_CKSCR      0xc0
+#define CPG_FRQCRA     0x00
+#define CPG_FRQCRB     0x04
+#define CPG_FRQCRC     0xe0
+#define CPG_PLL0CR     0xd8
+#define CPG_PLL1CR     0x28
+#define CPG_PLL2CR     0x2c
+#define CPG_PLL2HCR    0xe4
+#define CPG_PLL2SCR    0xf4
+
+#define CLK_ENABLE_ON_INIT BIT(0)
+
+struct div4_clk {
+       const char *name;
+       unsigned int reg;
+       unsigned int shift;
+};
+
+static struct div4_clk div4_clks[] = {
+       { "i",  CPG_FRQCRA, 20 },
+       { "m3", CPG_FRQCRA, 12 },
+       { "b",  CPG_FRQCRA,  8 },
+       { "m1", CPG_FRQCRA,  4 },
+       { "m2", CPG_FRQCRA,  0 },
+       { "zx", CPG_FRQCRB, 12 },
+       { "zs", CPG_FRQCRB,  8 },
+       { "hp", CPG_FRQCRB,  4 },
+       { NULL, 0, 0 },
+};
+
+static const struct clk_div_table div4_div_table[] = {
+       { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 },
+       { 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 },
+       { 12, 10 }, { 0, 0 }
+};
+
+static struct clk * __init
+r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
+                            const char *name)
+{
+       const struct clk_div_table *table = NULL;
+       const char *parent_name;
+       unsigned int shift, reg;
+       unsigned int mult = 1;
+       unsigned int div = 1;
+
+
+       if (!strcmp(name, "main")) {
+               u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
+
+               switch ((ckscr >> 28) & 3) {
+               case 0: /* extal1 */
+                       parent_name = of_clk_get_parent_name(np, 0);
+                       break;
+               case 1: /* extal1 / 2 */
+                       parent_name = of_clk_get_parent_name(np, 0);
+                       div = 2;
+                       break;
+               case 2: /* extal2 */
+                       parent_name = of_clk_get_parent_name(np, 1);
+                       break;
+               case 3: /* extal2 / 2 */
+                       parent_name = of_clk_get_parent_name(np, 1);
+                       div = 2;
+                       break;
+               }
+       } else if (!strcmp(name, "pll0")) {
+               /* PLL0/1 are configurable multiplier clocks. Register them as
+                * fixed factor clocks for now as there's no generic multiplier
+                * clock implementation and we currently have no need to change
+                * the multiplier value.
+                */
+               u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+
+               parent_name = "main";
+               mult = ((value >> 24) & 0x7f) + 1;
+               if (value & BIT(20))
+                       div = 2;
+       } else if (!strcmp(name, "pll1")) {
+               u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
+
+               parent_name = "main";
+               /* XXX: enable bit? */
+               mult = ((value >> 24) & 0x7f) + 1;
+               if (value & BIT(7))
+                       div = 2;
+       } else if (!strncmp(name, "pll2", 4)) {
+               u32 value, cr;
+
+               switch (name[4]) {
+               case 0:
+                       cr = CPG_PLL2CR;
+                       break;
+               case 's':
+                       cr = CPG_PLL2SCR;
+                       break;
+               case 'h':
+                       cr = CPG_PLL2HCR;
+                       break;
+               default:
+                       return ERR_PTR(-EINVAL);
+               }
+               value = clk_readl(cpg->reg + cr);
+               switch ((value >> 5) & 7) {
+               case 0:
+                       parent_name = "main";
+                       div = 2;
+                       break;
+               case 1:
+                       parent_name = "extal2";
+                       div = 2;
+                       break;
+               case 3:
+                       parent_name = "extal2";
+                       div = 4;
+                       break;
+               case 4:
+                       parent_name = "main";
+                       break;
+               case 5:
+                       parent_name = "extal2";
+                       break;
+               default:
+                       pr_warn("%s: unexpected parent of %s\n", __func__,
+                               name);
+                       return ERR_PTR(-EINVAL);
+               }
+               /* XXX: enable bit? */
+               mult = ((value >> 24) & 0x7f) + 1;
+       } else if (!strcmp(name, "z") || !strcmp(name, "z2")) {
+               u32 shift = 8;
+
+               parent_name = "pll0";
+               if (name[1] == '2') {
+                       div = 2;
+                       shift = 0;
+               }
+               div *= 32;
+               mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
+                      & 0x1f);
+       } else {
+               struct div4_clk *c;
+
+               for (c = div4_clks; c->name; c++) {
+                       if (!strcmp(name, c->name))
+                               break;
+               }
+               if (!c->name)
+                       return ERR_PTR(-EINVAL);
+
+               parent_name = "pll1";
+               table = div4_div_table;
+               reg = c->reg;
+               shift = c->shift;
+       }
+
+       if (!table) {
+               return clk_register_fixed_factor(NULL, name, parent_name, 0,
+                                                mult, div);
+       } else {
+               return clk_register_divider_table(NULL, name, parent_name, 0,
+                                                 cpg->reg + reg, shift, 4, 0,
+                                                 table, &cpg->lock);
+       }
+}
+
+static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
+{
+       struct r8a73a4_cpg *cpg;
+       struct clk **clks;
+       unsigned int i;
+       int num_clks;
+
+       num_clks = of_property_count_strings(np, "clock-output-names");
+       if (num_clks < 0) {
+               pr_err("%s: failed to count clocks\n", __func__);
+               return;
+       }
+
+       cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+       clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
+       if (cpg == NULL || clks == NULL) {
+               /* We're leaking memory on purpose, there's no point in cleaning
+                * up as the system won't boot anyway.
+                */
+               return;
+       }
+
+       spin_lock_init(&cpg->lock);
+
+       cpg->data.clks = clks;
+       cpg->data.clk_num = num_clks;
+
+       cpg->reg = of_iomap(np, 0);
+       if (WARN_ON(cpg->reg == NULL))
+               return;
+
+       for (i = 0; i < num_clks; ++i) {
+               const char *name;
+               struct clk *clk;
+
+               of_property_read_string_index(np, "clock-output-names", i,
+                                             &name);
+
+               clk = r8a73a4_cpg_register_clock(np, cpg, name);
+               if (IS_ERR(clk))
+                       pr_err("%s: failed to register %s %s clock (%ld)\n",
+                              __func__, np->name, name, PTR_ERR(clk));
+               else
+                       cpg->data.clks[i] = clk;
+       }
+
+       of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
+              r8a73a4_cpg_clocks_init);
index e996425d06a920728cd4f32448c19d08efb5a776..acfb6d7dbd6bc049fe39213d675d5a2602e65d9e 100644 (file)
@@ -33,6 +33,8 @@ struct rcar_gen2_cpg {
 #define CPG_FRQCRC                     0x000000e0
 #define CPG_FRQCRC_ZFC_MASK            (0x1f << 8)
 #define CPG_FRQCRC_ZFC_SHIFT           8
+#define CPG_ADSPCKCR                   0x0000025c
+#define CPG_RCANCKCR                   0x00000270
 
 /* -----------------------------------------------------------------------------
  * Z Clock
@@ -161,6 +163,88 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
        return clk;
 }
 
+static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
+                                                struct device_node *np)
+{
+       const char *parent_name = of_clk_get_parent_name(np, 1);
+       struct clk_fixed_factor *fixed;
+       struct clk_gate *gate;
+       struct clk *clk;
+
+       fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+       if (!fixed)
+               return ERR_PTR(-ENOMEM);
+
+       fixed->mult = 1;
+       fixed->div = 6;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               kfree(fixed);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gate->reg = cpg->reg + CPG_RCANCKCR;
+       gate->bit_idx = 8;
+       gate->flags = CLK_GATE_SET_TO_DISABLE;
+       gate->lock = &cpg->lock;
+
+       clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
+                                    &fixed->hw, &clk_fixed_factor_ops,
+                                    &gate->hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(fixed);
+       }
+
+       return clk;
+}
+
+/* ADSP divisors */
+static const struct clk_div_table cpg_adsp_div_table[] = {
+       {  1,  3 }, {  2,  4 }, {  3,  6 }, {  4,  8 },
+       {  5, 12 }, {  6, 16 }, {  7, 18 }, {  8, 24 },
+       { 10, 36 }, { 11, 48 }, {  0,  0 },
+};
+
+static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
+{
+       const char *parent_name = "pll1";
+       struct clk_divider *div;
+       struct clk_gate *gate;
+       struct clk *clk;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       div->reg = cpg->reg + CPG_ADSPCKCR;
+       div->width = 4;
+       div->table = cpg_adsp_div_table;
+       div->lock = &cpg->lock;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               kfree(div);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gate->reg = cpg->reg + CPG_ADSPCKCR;
+       gate->bit_idx = 8;
+       gate->flags = CLK_GATE_SET_TO_DISABLE;
+       gate->lock = &cpg->lock;
+
+       clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
+                                    &div->hw, &clk_divider_ops,
+                                    &gate->hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(div);
+       }
+
+       return clk;
+}
+
 /* -----------------------------------------------------------------------------
  * CPG Clock Data
  */
@@ -263,6 +347,10 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
                shift = 0;
        } else if (!strcmp(name, "z")) {
                return cpg_z_clk_register(cpg);
+       } else if (!strcmp(name, "rcan")) {
+               return cpg_rcan_clk_register(cpg, np);
+       } else if (!strcmp(name, "adsp")) {
+               return cpg_adsp_clk_register(cpg);
        } else {
                return ERR_PTR(-EINVAL);
        }
index 2282cef9f2ffb0d834b84be7b2b00c32190bde15..bf12a25eb3a22aab048f04d5280195d21bd7dd01 100644 (file)
@@ -37,8 +37,8 @@ static int flexgen_enable(struct clk_hw *hw)
        struct clk_hw *pgate_hw = &flexgen->pgate.hw;
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-       pgate_hw->clk = hw->clk;
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(pgate_hw, hw);
+       __clk_hw_set_clk(fgate_hw, hw);
 
        clk_gate_ops.enable(pgate_hw);
 
@@ -54,7 +54,7 @@ static void flexgen_disable(struct clk_hw *hw)
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
        /* disable only the final gate */
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(fgate_hw, hw);
 
        clk_gate_ops.disable(fgate_hw);
 
@@ -66,7 +66,7 @@ static int flexgen_is_enabled(struct clk_hw *hw)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(fgate_hw, hw);
 
        if (!clk_gate_ops.is_enabled(fgate_hw))
                return 0;
@@ -79,7 +79,7 @@ static u8 flexgen_get_parent(struct clk_hw *hw)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return clk_mux_ops.get_parent(mux_hw);
 }
@@ -89,7 +89,7 @@ static int flexgen_set_parent(struct clk_hw *hw, u8 index)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return clk_mux_ops.set_parent(mux_hw, index);
 }
@@ -124,8 +124,8 @@ unsigned long flexgen_recalc_rate(struct clk_hw *hw,
        struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
        unsigned long mid_rate;
 
-       pdiv_hw->clk = hw->clk;
-       fdiv_hw->clk = hw->clk;
+       __clk_hw_set_clk(pdiv_hw, hw);
+       __clk_hw_set_clk(fdiv_hw, hw);
 
        mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
 
@@ -138,16 +138,27 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
        struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
-       unsigned long primary_div = 0;
+       unsigned long div = 0;
        int ret = 0;
 
-       pdiv_hw->clk = hw->clk;
-       fdiv_hw->clk = hw->clk;
+       __clk_hw_set_clk(pdiv_hw, hw);
+       __clk_hw_set_clk(fdiv_hw, hw);
 
-       primary_div = clk_best_div(parent_rate, rate);
+       div = clk_best_div(parent_rate, rate);
 
-       clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
-       ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div);
+       /*
+       * pdiv is mainly targeted for low freq results, while fdiv
+       * should be used for div <= 64. The other way round can
+       * lead to 'duty cycle' issues.
+       */
+
+       if (div <= 64) {
+               clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
+               ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
+       } else {
+               clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+               ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
+       }
 
        return ret;
 }
index 79dc40b5cc688fb1a028ca8407cd3feb929ca873..9a15ec344a85900ea68029c730c02ad01ede5335 100644 (file)
@@ -94,7 +94,7 @@ static int clkgena_divmux_enable(struct clk_hw *hw)
        unsigned long timeout;
        int ret = 0;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
        if (ret)
@@ -116,7 +116,7 @@ static void clkgena_divmux_disable(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
 }
@@ -126,7 +126,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
 }
@@ -136,7 +136,7 @@ u8 clkgena_divmux_get_parent(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
        if ((s8)genamux->muxsel < 0) {
@@ -174,7 +174,7 @@ unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.recalc_rate(div_hw, parent_rate);
 }
@@ -185,7 +185,7 @@ static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
 }
@@ -196,7 +196,7 @@ static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.round_rate(div_hw, rate, prate);
 }
index a66953c0f43094a4b96a816fb0d4ff37bfedf466..3a5292e3fcf8086f6418c8dceae6c59aab79b082 100644 (file)
@@ -8,6 +8,7 @@ obj-y += clk-a20-gmac.o
 obj-y += clk-mod0.o
 obj-y += clk-sun8i-mbus.o
 obj-y += clk-sun9i-core.o
+obj-y += clk-sun9i-mmc.o
 
 obj-$(CONFIG_MFD_SUN6I_PRCM) += \
        clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
index 62e08fb58554cbe8d4bf8f1c5a08f08bbd58ec68..8c20190a3e9f4e134824449c10a72e9e13b5b957 100644 (file)
@@ -80,6 +80,8 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_p)
 {
@@ -156,9 +158,10 @@ static const struct clk_ops clk_factors_ops = {
        .set_rate = clk_factors_set_rate,
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-                                          const struct factors_data *data,
-                                          spinlock_t *lock)
+struct clk *sunxi_factors_register(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock,
+                                  void __iomem *reg)
 {
        struct clk *clk;
        struct clk_factors *factors;
@@ -168,11 +171,8 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
        struct clk_hw *mux_hw = NULL;
        const char *clk_name = node->name;
        const char *parents[FACTORS_MAX_PARENTS];
-       void __iomem *reg;
        int i = 0;
 
-       reg = of_iomap(node, 0);
-
        /* if we have a mux, we will have >1 parents */
        while (i < FACTORS_MAX_PARENTS &&
               (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
index 912238fde1324224863035da8a5836b5c276e8a0..171085ab5513e4f724017d05d6f91929262a4f78 100644 (file)
@@ -36,8 +36,9 @@ struct clk_factors {
        spinlock_t *lock;
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-                                          const struct factors_data *data,
-                                          spinlock_t *lock);
+struct clk *sunxi_factors_register(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock,
+                                  void __iomem *reg);
 
 #endif
index da0524eaee9406aff6c2d73f8b56b82c12360a16..ec8f5a1fca09f4240c433a1de2ea8207e6e369e5 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 
 #include "clk-factors.h"
 
@@ -67,7 +68,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
        .pwidth = 2,
 };
 
-static const struct factors_data sun4i_a10_mod0_data __initconst = {
+static const struct factors_data sun4i_a10_mod0_data = {
        .enable = 31,
        .mux = 24,
        .muxmask = BIT(1) | BIT(0),
@@ -79,15 +80,95 @@ static DEFINE_SPINLOCK(sun4i_a10_mod0_lock);
 
 static void __init sun4i_a10_mod0_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock);
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               /*
+                * This happens with mod0 clk nodes instantiated through
+                * mfd, as those do not have their resources assigned at
+                * CLK_OF_DECLARE time yet, so do not print an error.
+                */
+               return;
+       }
+
+       sunxi_factors_register(node, &sun4i_a10_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
 }
 CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
 
+static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *r;
+       void __iomem *reg;
+
+       if (!np)
+               return -ENODEV;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       reg = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
+
+       sunxi_factors_register(np, &sun4i_a10_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
+       return 0;
+}
+
+static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun4i-a10-mod0-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun4i_a10_mod0_clk_driver = {
+       .driver = {
+               .name = "sun4i-a10-mod0-clk",
+               .of_match_table = sun4i_a10_mod0_clk_dt_ids,
+       },
+       .probe = sun4i_a10_mod0_clk_probe,
+};
+module_platform_driver(sun4i_a10_mod0_clk_driver);
+
+static const struct factors_data sun9i_a80_mod0_data __initconst = {
+       .enable = 31,
+       .mux = 24,
+       .muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+       .table = &sun4i_a10_mod0_config,
+       .getter = sun4i_a10_get_mod0_factors,
+};
+
+static void __init sun9i_a80_mod0_setup(struct device_node *node)
+{
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg)) {
+               pr_err("Could not get registers for mod0-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
+}
+CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup);
+
 static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
 
 static void __init sun5i_a13_mbus_setup(struct device_node *node)
 {
-       struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock);
+       struct clk *mbus;
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for a13-mbus-clk\n");
+               return;
+       }
+
+       mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data,
+                                     &sun5i_a13_mbus_lock, reg);
 
        /* The MBUS clocks needs to be always enabled */
        __clk_get(mbus);
@@ -95,14 +176,10 @@ static void __init sun5i_a13_mbus_setup(struct device_node *node)
 }
 CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
 
-struct mmc_phase_data {
-       u8      offset;
-};
-
 struct mmc_phase {
        struct clk_hw           hw;
+       u8                      offset;
        void __iomem            *reg;
-       struct mmc_phase_data   *data;
        spinlock_t              *lock;
 };
 
@@ -118,7 +195,7 @@ static int mmc_get_phase(struct clk_hw *hw)
        u8 delay;
 
        value = readl(phase->reg);
-       delay = (value >> phase->data->offset) & 0x3;
+       delay = (value >> phase->offset) & 0x3;
 
        if (!delay)
                return 180;
@@ -206,8 +283,8 @@ static int mmc_set_phase(struct clk_hw *hw, int degrees)
 
        spin_lock_irqsave(phase->lock, flags);
        value = readl(phase->reg);
-       value &= ~GENMASK(phase->data->offset + 3, phase->data->offset);
-       value |= delay << phase->data->offset;
+       value &= ~GENMASK(phase->offset + 3, phase->offset);
+       value |= delay << phase->offset;
        writel(value, phase->reg);
        spin_unlock_irqrestore(phase->lock, flags);
 
@@ -219,66 +296,97 @@ static const struct clk_ops mmc_clk_ops = {
        .set_phase      = mmc_set_phase,
 };
 
-static void __init sun4i_a10_mmc_phase_setup(struct device_node *node,
-                                            struct mmc_phase_data *data)
+/*
+ * sunxi_mmc_setup - Common setup function for mmc module clocks
+ *
+ * The only difference between module clocks on different platforms is the
+ * width of the mux register bits and the valid values, which are passed in
+ * through struct factors_data. The phase clocks parts are identical.
+ */
+static void __init sunxi_mmc_setup(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock)
 {
-       const char *parent_names[1] = { of_clk_get_parent_name(node, 0) };
-       struct clk_init_data init = {
-               .num_parents    = 1,
-               .parent_names   = parent_names,
-               .ops            = &mmc_clk_ops,
-       };
-
-       struct mmc_phase *phase;
-       struct clk *clk;
-
-       phase = kmalloc(sizeof(*phase), GFP_KERNEL);
-       if (!phase)
+       struct clk_onecell_data *clk_data;
+       const char *parent;
+       void __iomem *reg;
+       int i;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg)) {
+               pr_err("Couldn't map the %s clock registers\n", node->name);
                return;
+       }
 
-       phase->hw.init = &init;
-
-       phase->reg = of_iomap(node, 0);
-       if (!phase->reg)
-               goto err_free;
-
-       phase->data = data;
-       phase->lock = &sun4i_a10_mod0_lock;
-
-       if (of_property_read_string(node, "clock-output-names", &init.name))
-               init.name = node->name;
+       clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL);
+       if (!clk_data)
+               return;
 
-       clk = clk_register(NULL, &phase->hw);
-       if (IS_ERR(clk))
-               goto err_unmap;
+       clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL);
+       if (!clk_data->clks)
+               goto err_free_data;
+
+       clk_data->clk_num = 3;
+       clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
+       if (!clk_data->clks[0])
+               goto err_free_clks;
+
+       parent = __clk_get_name(clk_data->clks[0]);
+
+       for (i = 1; i < 3; i++) {
+               struct clk_init_data init = {
+                       .num_parents    = 1,
+                       .parent_names   = &parent,
+                       .ops            = &mmc_clk_ops,
+               };
+               struct mmc_phase *phase;
+
+               phase = kmalloc(sizeof(*phase), GFP_KERNEL);
+               if (!phase)
+                       continue;
+
+               phase->hw.init = &init;
+               phase->reg = reg;
+               phase->lock = lock;
+
+               if (i == 1)
+                       phase->offset = 8;
+               else
+                       phase->offset = 20;
+
+               if (of_property_read_string_index(node, "clock-output-names",
+                                                 i, &init.name))
+                       init.name = node->name;
+
+               clk_data->clks[i] = clk_register(NULL, &phase->hw);
+               if (IS_ERR(clk_data->clks[i])) {
+                       kfree(phase);
+                       continue;
+               }
+       }
 
-       of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 
        return;
 
-err_unmap:
-       iounmap(phase->reg);
-err_free:
-       kfree(phase);
+err_free_clks:
+       kfree(clk_data->clks);
+err_free_data:
+       kfree(clk_data);
 }
 
+static DEFINE_SPINLOCK(sun4i_a10_mmc_lock);
 
-static struct mmc_phase_data mmc_output_clk = {
-       .offset = 8,
-};
-
-static struct mmc_phase_data mmc_sample_clk = {
-       .offset = 20,
-};
-
-static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
+static void __init sun4i_a10_mmc_setup(struct device_node *node)
 {
-       sun4i_a10_mmc_phase_setup(node, &mmc_output_clk);
+       sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup);
+CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup);
+
+static DEFINE_SPINLOCK(sun9i_a80_mmc_lock);
 
-static void __init sun4i_a10_mmc_sample_setup(struct device_node *node)
+static void __init sun9i_a80_mmc_setup(struct device_node *node)
 {
-       sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk);
+       sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup);
+CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup);
index 3d282fb8f85cc204d84c182aa4b16ae2181ea5e9..63cf149195ae1a40cb61a7b2256878c0f2ccd5ef 100644 (file)
@@ -45,6 +45,8 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
 }
 
 static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *best_parent_rate,
                                 struct clk_hw **best_parent_clk)
 {
index ef49786eefd3caa5b6f856287635a973213a14b3..14cd026064bf377ed7cbc05bbd84aa608e639fec 100644 (file)
@@ -69,8 +69,17 @@ static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
 
 static void __init sun8i_a23_mbus_setup(struct device_node *node)
 {
-       struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
-                                                 &sun8i_a23_mbus_lock);
+       struct clk *mbus;
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for a23-mbus-clk\n");
+               return;
+       }
+
+       mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
+                                     &sun8i_a23_mbus_lock, reg);
 
        /* The MBUS clocks needs to be always enabled */
        __clk_get(mbus);
index 3cb9036d91bb202fc057273302a6f2bcc31edf9f..d8da77d72861b29f0867d47b43b7917da31037f7 100644 (file)
 
 
 /**
- * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
+ * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4
  * PLL4 rate is calculated as follows
  * rate = (parent_rate * n >> p) / (m + 1);
- * parent_rate is always 24Mhz
+ * parent_rate is always 24MHz
  *
  * p and m are named div1 and div2 in Allwinner's SDK
  */
 
 static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+                                      u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
 {
-       int div;
+       int n;
+       int m = 1;
+       int p = 1;
 
-       /* Normalize value to a 6M multiple */
-       div = DIV_ROUND_UP(*freq, 6000000);
+       /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
+       n = DIV_ROUND_UP(*freq, 6000000);
 
-       /* divs above 256 cannot be odd */
-       if (div > 256)
-               div = round_up(div, 2);
+       /* If n is too large switch to steps of 12 MHz */
+       if (n > 255) {
+               m = 0;
+               n = (n + 1) / 2;
+       }
+
+       /* If n is still too large switch to steps of 24 MHz */
+       if (n > 255) {
+               p = 0;
+               n = (n + 1) / 2;
+       }
 
-       /* divs above 512 must be a multiple of 4 */
-       if (div > 512)
-               div = round_up(div, 4);
+       /* n must be between 12 and 255 */
+       if (n > 255)
+               n = 255;
+       else if (n < 12)
+               n = 12;
 
-       *freq = 6000000 * div;
+       *freq = ((24000000 * n) >> p) / (m + 1);
 
        /* we were called to round the frequency, we can now return */
-       if (n == NULL)
+       if (n_ret == NULL)
                return;
 
-       /* p will be 1 for divs under 512 */
-       if (div < 512)
-               *p = 1;
-       else
-               *p = 0;
-
-       /* m will be 1 if div is odd */
-       if (div & 1)
-               *m = 1;
-       else
-               *m = 0;
-
-       /* calculate a suitable n based on m and p */
-       *n = div / (*p + 1) / (*m + 1);
+       *n_ret = n;
+       *m_ret = m;
+       *p_ret = p;
 }
 
 static struct clk_factors_config sun9i_a80_pll4_config = {
@@ -89,7 +90,17 @@ static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
 
 static void __init sun9i_a80_pll4_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-pll4-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_pll4_data,
+                              &sun9i_a80_pll4_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
 
@@ -139,8 +150,18 @@ static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
 
 static void __init sun9i_a80_gt_setup(struct device_node *node)
 {
-       struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
-                                               &sun9i_a80_gt_lock);
+       void __iomem *reg;
+       struct clk *gt;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-gt-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
+                                   &sun9i_a80_gt_lock, reg);
 
        /* The GT bus clock needs to be always enabled */
        __clk_get(gt);
@@ -194,7 +215,17 @@ static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
 
 static void __init sun9i_a80_ahb_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-ahb-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_ahb_data,
+                              &sun9i_a80_ahb_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
 
@@ -210,7 +241,17 @@ static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
 
 static void __init sun9i_a80_apb0_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-apb0-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_apb0_data,
+                              &sun9i_a80_apb0_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
 
@@ -266,6 +307,16 @@ static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
 
 static void __init sun9i_a80_apb1_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-apb1-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_apb1_data,
+                              &sun9i_a80_apb1_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
new file mode 100644 (file)
index 0000000..710c273
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai        <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#define SUN9I_MMC_WIDTH                4
+
+#define SUN9I_MMC_GATE_BIT     16
+#define SUN9I_MMC_RESET_BIT    18
+
+struct sun9i_mmc_clk_data {
+       spinlock_t                      lock;
+       void __iomem                    *membase;
+       struct clk                      *clk;
+       struct reset_control            *reset;
+       struct clk_onecell_data         clk_data;
+       struct reset_controller_dev     rcdev;
+};
+
+static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       struct sun9i_mmc_clk_data *data = container_of(rcdev,
+                                                      struct sun9i_mmc_clk_data,
+                                                      rcdev);
+       unsigned long flags;
+       void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+       u32 val;
+
+       clk_prepare_enable(data->clk);
+       spin_lock_irqsave(&data->lock, flags);
+
+       val = readl(reg);
+       writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct sun9i_mmc_clk_data *data = container_of(rcdev,
+                                                      struct sun9i_mmc_clk_data,
+                                                      rcdev);
+       unsigned long flags;
+       void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+       u32 val;
+
+       clk_prepare_enable(data->clk);
+       spin_lock_irqsave(&data->lock, flags);
+
+       val = readl(reg);
+       writel(val | BIT(SUN9I_MMC_RESET_BIT), reg);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static struct reset_control_ops sun9i_mmc_reset_ops = {
+       .assert         = sun9i_mmc_reset_assert,
+       .deassert       = sun9i_mmc_reset_deassert,
+};
+
+static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sun9i_mmc_clk_data *data;
+       struct clk_onecell_data *clk_data;
+       const char *clk_name = np->name;
+       const char *clk_parent;
+       struct resource *r;
+       int count, i, ret;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       spin_lock_init(&data->lock);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       /* one clock/reset pair per word */
+       count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
+       data->membase = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(data->membase))
+               return PTR_ERR(data->membase);
+
+       clk_data = &data->clk_data;
+       clk_data->clk_num = count;
+       clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *),
+                                     GFP_KERNEL);
+       if (!clk_data->clks)
+               return -ENOMEM;
+
+       data->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->clk)) {
+               dev_err(&pdev->dev, "Could not get clock\n");
+               return PTR_ERR(data->clk);
+       }
+
+       data->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(data->reset)) {
+               dev_err(&pdev->dev, "Could not get reset control\n");
+               return PTR_ERR(data->reset);
+       }
+
+       ret = reset_control_deassert(data->reset);
+       if (ret) {
+               dev_err(&pdev->dev, "Reset deassert err %d\n", ret);
+               return ret;
+       }
+
+       clk_parent = __clk_get_name(data->clk);
+       for (i = 0; i < count; i++) {
+               of_property_read_string_index(np, "clock-output-names",
+                                             i, &clk_name);
+
+               clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
+                                                     clk_parent, 0,
+                                                     data->membase + SUN9I_MMC_WIDTH * i,
+                                                     SUN9I_MMC_GATE_BIT, 0,
+                                                     &data->lock);
+
+               if (IS_ERR(clk_data->clks[i])) {
+                       ret = PTR_ERR(clk_data->clks[i]);
+                       goto err_clk_register;
+               }
+       }
+
+       ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+       if (ret)
+               goto err_clk_provider;
+
+       data->rcdev.owner = THIS_MODULE;
+       data->rcdev.nr_resets = count;
+       data->rcdev.ops = &sun9i_mmc_reset_ops;
+       data->rcdev.of_node = pdev->dev.of_node;
+
+       ret = reset_controller_register(&data->rcdev);
+       if (ret)
+               goto err_rc_reg;
+
+       platform_set_drvdata(pdev, data);
+
+       return 0;
+
+err_rc_reg:
+       of_clk_del_provider(np);
+
+err_clk_provider:
+       for (i = 0; i < count; i++)
+               clk_unregister(clk_data->clks[i]);
+
+err_clk_register:
+       reset_control_assert(data->reset);
+
+       return ret;
+}
+
+static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
+       struct clk_onecell_data *clk_data = &data->clk_data;
+       int i;
+
+       reset_controller_unregister(&data->rcdev);
+       of_clk_del_provider(np);
+       for (i = 0; i < clk_data->clk_num; i++)
+               clk_unregister(clk_data->clks[i]);
+
+       reset_control_assert(data->reset);
+
+       return 0;
+}
+
+static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
+       .driver = {
+               .name = "sun9i-a80-mmc-config-clk",
+               .of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
+       },
+       .probe = sun9i_a80_mmc_config_clk_probe,
+       .remove = sun9i_a80_mmc_config_clk_remove,
+};
+module_platform_driver(sun9i_a80_mmc_config_clk_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
+MODULE_LICENSE("GPL v2");
index 1818f404538d377d22b8b1d1602a0130e388018a..379324eb5486e1b332d19fcf66fc8d7a0c4aff15 100644 (file)
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
 #include <linux/spinlock.h>
+#include <linux/log2.h>
 
 #include "clk-factors.h"
 
 static DEFINE_SPINLOCK(clk_lock);
 
+/**
+ * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
+ */
+
+#define SUN6I_AHB1_MAX_PARENTS         4
+#define SUN6I_AHB1_MUX_PARENT_PLL6     3
+#define SUN6I_AHB1_MUX_SHIFT           12
+/* un-shifted mask is what mux_clk expects */
+#define SUN6I_AHB1_MUX_MASK            0x3
+#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
+                                        SUN6I_AHB1_MUX_MASK)
+
+#define SUN6I_AHB1_DIV_SHIFT           4
+#define SUN6I_AHB1_DIV_MASK            (0x3 << SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_GET(reg)                ((reg & SUN6I_AHB1_DIV_MASK) >> \
+                                               SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_SET(reg, div)   ((reg & ~SUN6I_AHB1_DIV_MASK) | \
+                                               (div << SUN6I_AHB1_DIV_SHIFT))
+#define SUN6I_AHB1_PLL6_DIV_SHIFT      6
+#define SUN6I_AHB1_PLL6_DIV_MASK       (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_GET(reg)   ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
+                                               SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
+                                               (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
+
+struct sun6i_ahb1_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+};
+
+#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
+
+static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+       unsigned long rate;
+       u32 reg;
+
+       /* Fetch the register value */
+       reg = readl(ahb1->reg);
+
+       /* apply pre-divider first if parent is pll6 */
+       if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
+               parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
+
+       /* clk divider */
+       rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
+
+       return rate;
+}
+
+static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
+                                u8 parent, unsigned long parent_rate)
+{
+       u8 div, calcp, calcm = 1;
+
+       /*
+        * clock can only divide, so we will never be able to achieve
+        * frequencies higher than the parent frequency
+        */
+       if (parent_rate && rate > parent_rate)
+               rate = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, rate);
+
+       /* calculate pre-divider if parent is pll6 */
+       if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
+               if (div < 4)
+                       calcp = 0;
+               else if (div / 2 < 4)
+                       calcp = 1;
+               else if (div / 4 < 4)
+                       calcp = 2;
+               else
+                       calcp = 3;
+
+               calcm = DIV_ROUND_UP(div, 1 << calcp);
+       } else {
+               calcp = __roundup_pow_of_two(div);
+               calcp = calcp > 3 ? 3 : calcp;
+       }
+
+       /* we were asked to pass back divider values */
+       if (divp) {
+               *divp = calcp;
+               *pre_divp = calcm - 1;
+       }
+
+       return (parent_rate / calcm) >> calcp;
+}
+
+static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                         unsigned long min_rate,
+                                         unsigned long max_rate,
+                                         unsigned long *best_parent_rate,
+                                         struct clk_hw **best_parent_clk)
+{
+       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       int i, num_parents;
+       unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
+
+       /* find the parent that can help provide the fastest rate <= rate */
+       num_parents = __clk_get_num_parents(clk);
+       for (i = 0; i < num_parents; i++) {
+               parent = clk_get_parent_by_index(clk, i);
+               if (!parent)
+                       continue;
+               if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_round_rate(parent, rate);
+               else
+                       parent_rate = __clk_get_rate(parent);
+
+               child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
+                                                 parent_rate);
+
+               if (child_rate <= rate && child_rate > best_child_rate) {
+                       best_parent = parent;
+                       best = parent_rate;
+                       best_child_rate = child_rate;
+               }
+       }
+
+       if (best_parent)
+               *best_parent_clk = __clk_get_hw(best_parent);
+       *best_parent_rate = best;
+
+       return best_child_rate;
+}
+
+static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+       unsigned long flags;
+       u8 div, pre_div, parent;
+       u32 reg;
+
+       spin_lock_irqsave(&clk_lock, flags);
+
+       reg = readl(ahb1->reg);
+
+       /* need to know which parent is used to apply pre-divider */
+       parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
+       sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
+
+       reg = SUN6I_AHB1_DIV_SET(reg, div);
+       reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
+       writel(reg, ahb1->reg);
+
+       spin_unlock_irqrestore(&clk_lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops sun6i_ahb1_clk_ops = {
+       .determine_rate = sun6i_ahb1_clk_determine_rate,
+       .recalc_rate    = sun6i_ahb1_clk_recalc_rate,
+       .set_rate       = sun6i_ahb1_clk_set_rate,
+};
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+       struct sun6i_ahb1_clk *ahb1;
+       struct clk_mux *mux;
+       const char *clk_name = node->name;
+       const char *parents[SUN6I_AHB1_MAX_PARENTS];
+       void __iomem *reg;
+       int i = 0;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+
+       /* we have a mux, we will have >1 parents */
+       while (i < SUN6I_AHB1_MAX_PARENTS &&
+              (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+               i++;
+
+       of_property_read_string(node, "clock-output-names", &clk_name);
+
+       ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
+       if (!ahb1)
+               return;
+
+       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+       if (!mux) {
+               kfree(ahb1);
+               return;
+       }
+
+       /* set up clock properties */
+       mux->reg = reg;
+       mux->shift = SUN6I_AHB1_MUX_SHIFT;
+       mux->mask = SUN6I_AHB1_MUX_MASK;
+       mux->lock = &clk_lock;
+       ahb1->reg = reg;
+
+       clk = clk_register_composite(NULL, clk_name, parents, i,
+                                    &mux->hw, &clk_mux_ops,
+                                    &ahb1->hw, &sun6i_ahb1_clk_ops,
+                                    NULL, NULL, 0);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               clk_register_clkdev(clk, clk_name, NULL);
+       }
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
+
 /* Maximum number of parents our clocks have */
 #define SUNXI_MAX_PARENTS      5
 
@@ -354,43 +564,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
        *p = calcp;
 }
 
-/**
- * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
- */
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output)
-{
-       #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
-       #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
-
-       struct clk_hw *hw = __clk_get_hw(clk);
-       struct clk_composite *composite = to_clk_composite(hw);
-       struct clk_hw *rate_hw = composite->rate_hw;
-       struct clk_factors *factors = to_clk_factors(rate_hw);
-       unsigned long flags = 0;
-       u32 reg;
-
-       if (factors->lock)
-               spin_lock_irqsave(factors->lock, flags);
-
-       reg = readl(factors->reg);
-
-       /* set sample clock phase control */
-       reg &= ~(0x7 << 20);
-       reg |= ((sample & 0x7) << 20);
-
-       /* set output clock phase control */
-       reg &= ~(0x7 << 8);
-       reg |= ((output & 0x7) << 8);
-
-       writel(reg, factors->reg);
-
-       if (factors->lock)
-               spin_unlock_irqrestore(factors->lock, flags);
-}
-EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
-
-
 /**
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
@@ -413,6 +586,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
        .kwidth = 2,
        .mshift = 0,
        .mwidth = 2,
+       .n_start = 1,
 };
 
 static struct clk_factors_config sun8i_a23_pll1_config = {
@@ -520,7 +694,16 @@ static const struct factors_data sun7i_a20_out_data __initconst = {
 static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
                                                   const struct factors_data *data)
 {
-       return sunxi_factors_register(node, data, &clk_lock);
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for factors-clk: %s\n",
+                      node->name);
+               return NULL;
+       }
+
+       return sunxi_factors_register(node, data, &clk_lock, reg);
 }
 
 
@@ -561,7 +744,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
        of_property_read_string(node, "clock-output-names", &clk_name);
 
        clk = clk_register_mux(NULL, clk_name, parents, i,
-                              CLK_SET_RATE_NO_REPARENT, reg,
+                              CLK_SET_RATE_PARENT, reg,
                               data->shift, SUNXI_MUX_GATE_WIDTH,
                               0, &clk_lock);
 
@@ -1217,7 +1400,6 @@ CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
 
 static const char *sun6i_critical_clocks[] __initdata = {
        "cpu",
-       "ahb1_sdram",
 };
 
 static void __init sun6i_init_clocks(struct device_node *node)
index f7dfb72884a4e2d177261984023d7c20f9249a22..edb8358fa6cebab596e7f39c022736a4b6c31457 100644 (file)
@@ -15,3 +15,4 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC)         += clk-tegra20.o
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)       += clk-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)       += clk-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC)       += clk-tegra124.o
index 0011d547a9f7ed1e5767625582c1b1ab59a34c9e..60738cc954cb3dd857763d7aaa99ea955d939e5e 100644 (file)
@@ -64,10 +64,8 @@ enum clk_id {
        tegra_clk_disp2,
        tegra_clk_dp2,
        tegra_clk_dpaux,
-       tegra_clk_dsia,
        tegra_clk_dsialp,
        tegra_clk_dsia_mux,
-       tegra_clk_dsib,
        tegra_clk_dsiblp,
        tegra_clk_dsib_mux,
        tegra_clk_dtv,
index 9e899c18af8678c8eac70fab0ede9d8631fff3f3..d84ae49d0e05eead08c6379a0236c339ff88cb5c 100644 (file)
@@ -28,7 +28,7 @@ static u8 clk_periph_get_parent(struct clk_hw *hw)
        const struct clk_ops *mux_ops = periph->mux_ops;
        struct clk_hw *mux_hw = &periph->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->get_parent(mux_hw);
 }
@@ -39,7 +39,7 @@ static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
        const struct clk_ops *mux_ops = periph->mux_ops;
        struct clk_hw *mux_hw = &periph->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->set_parent(mux_hw, index);
 }
@@ -51,7 +51,7 @@ static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->recalc_rate(div_hw, parent_rate);
 }
@@ -63,7 +63,7 @@ static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->round_rate(div_hw, rate, prate);
 }
@@ -75,7 +75,7 @@ static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->set_rate(div_hw, rate, parent_rate);
 }
@@ -86,7 +86,7 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
        const struct clk_ops *gate_ops = periph->gate_ops;
        struct clk_hw *gate_hw = &periph->gate.hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->is_enabled(gate_hw);
 }
@@ -97,7 +97,7 @@ static int clk_periph_enable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = periph->gate_ops;
        struct clk_hw *gate_hw = &periph->gate.hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->enable(gate_hw);
 }
index c7c6d8fb32fbb14bfc0727024bdd91ccab6009b8..bfef9abdf23250d39504587f7f41d3ca6ec6e06a 100644 (file)
@@ -816,7 +816,9 @@ const struct clk_ops tegra_clk_plle_ops = {
        .enable = clk_plle_enable,
 };
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_132_SOC)
 
 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
                           unsigned long parent_rate)
@@ -1505,7 +1507,9 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
        return clk;
 }
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllxc_ops = {
        .is_enabled = clk_pll_is_enabled,
        .enable = clk_pll_iddq_enable,
@@ -1565,7 +1569,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1665,7 +1669,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1706,7 +1710,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1802,7 +1806,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
 }
 #endif
 
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllss_ops = {
        .is_enabled = clk_pll_is_enabled,
        .enable = clk_pll_iddq_enable,
@@ -1830,7 +1834,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
index 37f32c49674eb293cb242b34b446ce73e227de37..cef0727b9eec98b91de3db3c2373fe8e508e5750 100644 (file)
@@ -434,10 +434,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
        MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
        MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
-       MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1),
-       MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2),
-       MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3),
-       MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4),
+       MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1),
+       MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2),
+       MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
+       MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
        MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
        MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
        MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
@@ -470,10 +470,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
        MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
        MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
-       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
-       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
-       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
-       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
+       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8),
+       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8),
+       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8),
+       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8),
        MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
        MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
        MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -537,8 +537,6 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
        GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
        GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
-       GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
-       GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
        GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
        GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
        GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
index 0b03d2cf7264f7d42dafb84fd83bb29e18f420a1..d0766423a5d607554ff8f9e9f9b7f610f5d875a3 100644 (file)
@@ -715,7 +715,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
        [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
        [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
-       [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
        [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
        [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
        [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
@@ -739,7 +738,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
        [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
-       [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
        [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
        [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
@@ -1224,6 +1222,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
                               clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
        clks[TEGRA114_CLK_DSIB_MUX] = clk;
 
+       clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
+                                            0, 48, periph_clk_enb_refcnt);
+       clks[TEGRA114_CLK_DSIA] = clk;
+
+       clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
+                                            0, 82, periph_clk_enb_refcnt);
+       clks[TEGRA114_CLK_DSIB] = clk;
+
        /* emc mux */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
                               ARRAY_SIZE(mux_pllmcp_clkm),
index f5f9baca7bb621924d14ff4d9364d18580e0fbc2..9a893f2fe8e9889e7e1df95d12ba90d687763942 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014 NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #include "clk.h"
 #include "clk-id.h"
 
+/*
+ * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register
+ * banks present in the Tegra124/132 CAR IP block.  The banks are
+ * identified by single letters, e.g.: L, H, U, V, W, X.  See
+ * periph_regs[] in drivers/clk/tegra/clk.c
+ */
+#define TEGRA124_CAR_BANK_COUNT                        6
+
 #define CLK_SOURCE_CSITE 0x1d4
 #define CLK_SOURCE_EMC 0x19c
 
@@ -128,7 +136,6 @@ static unsigned long osc_freq;
 static unsigned long pll_ref_freq;
 
 static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(pll_d2_lock);
 static DEFINE_SPINLOCK(pll_e_lock);
 static DEFINE_SPINLOCK(pll_re_lock);
 static DEFINE_SPINLOCK(pll_u_lock);
@@ -145,11 +152,6 @@ static unsigned long tegra124_input_freq[] = {
        [12] = 260000000,
 };
 
-static const char *mux_plld_out0_plld2_out0[] = {
-       "pll_d_out0", "pll_d2_out0",
-};
-#define mux_plld_out0_plld2_out0_idx NULL
-
 static const char *mux_pllmcp_clkm[] = {
        "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
 };
@@ -783,7 +785,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
        [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
        [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
-       [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
        [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
        [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
        [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
@@ -809,7 +810,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
        [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
-       [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
        [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
        [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
@@ -949,8 +949,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
        [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
        [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
-       [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
-       [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -1112,17 +1110,17 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
                                        1, 2);
        clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
 
-       /* dsia mux */
-       clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
-                              ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-                              clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
-       clks[TEGRA124_CLK_DSIA_MUX] = clk;
+       clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
+                               clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
+       clks[TEGRA124_CLK_PLLD_DSI] = clk;
+
+       clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
+                                            0, 48, periph_clk_enb_refcnt);
+       clks[TEGRA124_CLK_DSIA] = clk;
 
-       /* dsib mux */
-       clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
-                              ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-                              clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
-       clks[TEGRA124_CLK_DSIB_MUX] = clk;
+       clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
+                                            0, 82, periph_clk_enb_refcnt);
+       clks[TEGRA124_CLK_DSIB] = clk;
 
        /* emc mux */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1351,7 +1349,7 @@ static const struct of_device_id pmc_match[] __initconst = {
        {},
 };
 
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table common_init_table[] __initdata = {
        {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
        {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
        {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
@@ -1368,6 +1366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
        {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
        {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
+       {TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0},
+       {TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0},
        {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
        {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
        {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
@@ -1385,27 +1385,73 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
        {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
        {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
-       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
+       /* This MUST be the last entry. */
+       {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
        {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
+       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+       /* This MUST be the last entry. */
+       {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+/* Tegra132 requires the SOC_THERM clock to remain active */
+static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
+       {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1},
        /* This MUST be the last entry. */
        {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
 };
 
+/**
+ * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra124 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
 static void __init tegra124_clock_apply_init_table(void)
 {
-       tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX);
 }
 
-static void __init tegra124_clock_init(struct device_node *np)
+/**
+ * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra132 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
+static void __init tegra132_clock_apply_init_table(void)
+{
+       tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX);
+}
+
+/**
+ * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the clocks controlled by the CAR IP block, along
+ * with a few clocks controlled by the PMC IP block.  Everything in
+ * this function should be common to Tegra124 and Tegra132.  XXX The
+ * PMC clock initialization should probably be moved to PMC-specific
+ * driver code.  No return value.
+ */
+static void __init tegra124_132_clock_init_pre(struct device_node *np)
 {
        struct device_node *node;
+       u32 plld_base;
 
        clk_base = of_iomap(np, 0);
        if (!clk_base) {
-               pr_err("ioremap tegra124 CAR failed\n");
+               pr_err("ioremap tegra124/tegra132 CAR failed\n");
                return;
        }
 
@@ -1423,7 +1469,8 @@ static void __init tegra124_clock_init(struct device_node *np)
                return;
        }
 
-       clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6);
+       clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX,
+                             TEGRA124_CAR_BANK_COUNT);
        if (!clks)
                return;
 
@@ -1437,13 +1484,76 @@ static void __init tegra124_clock_init(struct device_node *np)
        tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
        tegra_pmc_clk_init(pmc_base, tegra124_clks);
 
+       /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
+       plld_base = clk_readl(clk_base + PLLD_BASE);
+       plld_base &= ~BIT(25);
+       clk_writel(plld_base, clk_base + PLLD_BASE);
+}
+
+/**
+ * tegra124_132_clock_init_post - clock initialization postamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the along with a few clocks controlled by the PMC
+ * IP block.  Everything in this function should be common to Tegra124
+ * and Tegra132.  This function must be called after
+ * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
+ * not be set.  No return value.
+ */
+static void __init tegra124_132_clock_init_post(struct device_node *np)
+{
        tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
-                                       &pll_x_params);
+                                 &pll_x_params);
        tegra_add_of_provider(np);
        tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
 
+       tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+}
+
+/**
+ * tegra124_clock_init - Tegra124-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra124 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra124-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra124_clock_init(struct device_node *np)
+{
+       tegra124_132_clock_init_pre(np);
        tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
+       tegra124_132_clock_init_post(np);
+}
 
-       tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+/**
+ * tegra132_clock_init - Tegra132-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra132 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra132-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra132_clock_init(struct device_node *np)
+{
+       tegra124_132_clock_init_pre(np);
+
+       /*
+        * On Tegra132, these clocks are controlled by the
+        * CLUSTER_clocks IP block, located in the CPU complex
+        */
+       tegra124_clks[tegra_clk_cclk_g].present = false;
+       tegra124_clks[tegra_clk_cclk_lp].present = false;
+       tegra124_clks[tegra_clk_pll_x].present = false;
+       tegra124_clks[tegra_clk_pll_x_out0].present = false;
+
+       tegra_clk_apply_init_table = tegra132_clock_apply_init_table;
+       tegra124_132_clock_init_post(np);
 }
 CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
+CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init);
index 97dc8595c3cd5c1b59113deaa16bbd23a5298cfc..9ddb7547cb431b4b234d5ec6a41fba457abc2678 100644 (file)
@@ -302,10 +302,13 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
 
 tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
 
-void __init tegra_clocks_apply_init_table(void)
+static int __init tegra_clocks_apply_init_table(void)
 {
        if (!tegra_clk_apply_init_table)
-               return;
+               return 0;
 
        tegra_clk_apply_init_table();
+
+       return 0;
 }
+arch_initcall(tegra_clocks_apply_init_table);
index ed4d0aaf891639585825344d7460bbfcbc1c81e2..105ffd0f5e79da04e496b291e4eb3968b834ba64 100644 (file)
@@ -1,13 +1,17 @@
-ifneq ($(CONFIG_OF),)
 obj-y                                  += clk.o autoidle.o clockdomain.o
 clk-common                             = dpll.o composite.o divider.o gate.o \
                                          fixed-factor.o mux.o apll.o
 obj-$(CONFIG_SOC_AM33XX)               += $(clk-common) clk-33xx.o
+obj-$(CONFIG_SOC_TI81XX)               += $(clk-common) fapll.o clk-816x.o
 obj-$(CONFIG_ARCH_OMAP2)               += $(clk-common) interface.o clk-2xxx.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o \
+                                          clk-3xxx.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(clk-common) clk-44xx.o
 obj-$(CONFIG_SOC_OMAP5)                        += $(clk-common) clk-54xx.o
 obj-$(CONFIG_SOC_DRA7XX)               += $(clk-common) clk-7xx.o \
                                           clk-dra7-atl.o
 obj-$(CONFIG_SOC_AM43XX)               += $(clk-common) clk-43xx.o
+
+ifdef CONFIG_ATAGS
+obj-$(CONFIG_ARCH_OMAP3)                += clk-3xxx-legacy.o
 endif
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
new file mode 100644 (file)
index 0000000..e0732a4
--- /dev/null
@@ -0,0 +1,4653 @@
+/*
+ * OMAP3 Legacy clock data
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_clk_fixed virt_12m_ck_data = {
+       .frequency = 12000000,
+};
+
+static struct ti_clk virt_12m_ck = {
+       .name = "virt_12m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_12m_ck_data,
+};
+
+static struct ti_clk_fixed virt_13m_ck_data = {
+       .frequency = 13000000,
+};
+
+static struct ti_clk virt_13m_ck = {
+       .name = "virt_13m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_13m_ck_data,
+};
+
+static struct ti_clk_fixed virt_19200000_ck_data = {
+       .frequency = 19200000,
+};
+
+static struct ti_clk virt_19200000_ck = {
+       .name = "virt_19200000_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_19200000_ck_data,
+};
+
+static struct ti_clk_fixed virt_26000000_ck_data = {
+       .frequency = 26000000,
+};
+
+static struct ti_clk virt_26000000_ck = {
+       .name = "virt_26000000_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_26000000_ck_data,
+};
+
+static struct ti_clk_fixed virt_38_4m_ck_data = {
+       .frequency = 38400000,
+};
+
+static struct ti_clk virt_38_4m_ck = {
+       .name = "virt_38_4m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_38_4m_ck_data,
+};
+
+static struct ti_clk_fixed virt_16_8m_ck_data = {
+       .frequency = 16800000,
+};
+
+static struct ti_clk virt_16_8m_ck = {
+       .name = "virt_16_8m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_16_8m_ck_data,
+};
+
+static const char *osc_sys_ck_parents[] = {
+       "virt_12m_ck",
+       "virt_13m_ck",
+       "virt_19200000_ck",
+       "virt_26000000_ck",
+       "virt_38_4m_ck",
+       "virt_16_8m_ck",
+};
+
+static struct ti_clk_mux osc_sys_ck_data = {
+       .num_parents = ARRAY_SIZE(osc_sys_ck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_PRM,
+       .parents = osc_sys_ck_parents,
+};
+
+static struct ti_clk osc_sys_ck = {
+       .name = "osc_sys_ck",
+       .type = TI_CLK_MUX,
+       .data = &osc_sys_ck_data,
+};
+
+static struct ti_clk_divider sys_ck_data = {
+       .parent = "osc_sys_ck",
+       .bit_shift = 6,
+       .max_div = 3,
+       .reg = 0x1270,
+       .module = TI_CLKM_PRM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk sys_ck = {
+       .name = "sys_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &sys_ck_data,
+};
+
+static const char *dpll3_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll3_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll3_ck_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd40,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll3_ck_parents,
+       .flags = CLKF_CORE,
+       .freqsel_mask = 0xf0,
+       .div1_mask = 0x7f00,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x5,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff0000,
+       .recal_st_bit = 0x5,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll3_ck = {
+       .name = "dpll3_ck",
+       .clkdm_name = "dpll3_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll3_ck_data,
+};
+
+static struct ti_clk_divider dpll3_m2_ck_data = {
+       .parent = "dpll3_ck",
+       .bit_shift = 27,
+       .max_div = 31,
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m2_ck = {
+       .name = "dpll3_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll3_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor core_ck_data = {
+       .parent = "dpll3_m2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_ck = {
+       .name = "core_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_ck_data,
+};
+
+static struct ti_clk_divider l3_ick_data = {
+       .parent = "core_ck",
+       .max_div = 3,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l3_ick = {
+       .name = "l3_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor security_l3_ick_data = {
+       .parent = "l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk security_l3_ick = {
+       .name = "security_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &security_l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor wkup_l4_ick_data = {
+       .parent = "sys_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wkup_l4_ick = {
+       .name = "wkup_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wkup_l4_ick_data,
+};
+
+static struct ti_clk_gate usim_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 9,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usim_ick = {
+       .name = "usim_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usim_ick_data,
+};
+
+static struct ti_clk_gate dss2_alwon_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 1,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss2_alwon_fck = {
+       .name = "dss2_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss2_alwon_fck_data,
+};
+
+static struct ti_clk_divider l4_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 2,
+       .max_div = 3,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l4_ick = {
+       .name = "l4_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_l4_ick = {
+       .name = "core_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_l4_ick_data,
+};
+
+static struct ti_clk_gate mmchs2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 25,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs2_ick = {
+       .name = "mmchs2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs2_ick_data,
+};
+
+static const char *dpll4_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll4_ck_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd44,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll4_ck_parents,
+       .flags = CLKF_PER,
+       .freqsel_mask = 0xf00000,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x2,
+       .auto_recal_bit = 0x13,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x6,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x70000,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x6,
+       .autoidle_mask = 0x38,
+};
+
+static struct ti_clk dpll4_ck = {
+       .name = "dpll4_ck",
+       .clkdm_name = "dpll4_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll4_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m2_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 63,
+       .reg = 0xd48,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m2_ck = {
+       .name = "dpll4_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = {
+       .parent = "dpll4_m2_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m2x2_mul_ck = {
+       .name = "dpll4_m2x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m2x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_data = {
+       .parent = "dpll4_m2x2_mul_ck",
+       .bit_shift = 0x1b,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck = {
+       .name = "dpll4_m2x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = {
+       .parent = "dpll4_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_alwon_fck = {
+       .name = "omap_96m_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_fck_data = {
+       .parent = "omap_96m_alwon_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk cm_96m_fck = {
+       .name = "cm_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &cm_96m_fck_data,
+};
+
+static const char *omap_96m_fck_parents[] = {
+       "cm_96m_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux omap_96m_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(omap_96m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_96m_fck_parents,
+};
+
+static struct ti_clk omap_96m_fck = {
+       .name = "omap_96m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_96m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_96m_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_96m_fck = {
+       .name = "core_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_96m_fck_data,
+};
+
+static struct ti_clk_gate mspro_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 23,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mspro_fck = {
+       .name = "mspro_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mspro_fck_data,
+};
+
+static struct ti_clk_gate dss_ick_3430es2_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xe10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es2 = {
+       .name = "dss_ick",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_ick_3430es2_data,
+};
+
+static struct ti_clk_gate uart4_ick_am35xx_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 23,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick_am35xx = {
+       .name = "uart4_ick_am35xx",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_ick_am35xx_data,
+};
+
+static struct ti_clk_fixed_factor security_l4_ick2_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk security_l4_ick2 = {
+       .name = "security_l4_ick2",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &security_l4_ick2_data,
+};
+
+static struct ti_clk_gate aes1_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 3,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes1_ick = {
+       .name = "aes1_ick",
+       .type = TI_CLK_GATE,
+       .data = &aes1_ick_data,
+};
+
+static const char *dpll5_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll5_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll5_ck_parents),
+       .control_reg = 0xd04,
+       .idlest_reg = 0xd24,
+       .mult_div1_reg = 0xd4c,
+       .autoidle_reg = 0xd34,
+       .module = TI_CLKM_CM,
+       .parents = dpll5_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x19,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x19,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll5_ck = {
+       .name = "dpll5_ck",
+       .clkdm_name = "dpll5_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll5_ck_data,
+};
+
+static struct ti_clk_divider dpll5_m2_ck_data = {
+       .parent = "dpll5_ck",
+       .max_div = 31,
+       .reg = 0xd50,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll5_m2_ck = {
+       .name = "dpll5_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll5_m2_ck_data,
+};
+
+static struct ti_clk_gate usbhost_120m_fck_data = {
+       .parent = "dpll5_m2_ck",
+       .bit_shift = 1,
+       .reg = 0x1400,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk usbhost_120m_fck = {
+       .name = "usbhost_120m_fck",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_120m_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_d2_fck_data = {
+       .parent = "cm_96m_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk cm_96m_d2_fck = {
+       .name = "cm_96m_d2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &cm_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed sys_altclk_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk sys_altclk = {
+       .name = "sys_altclk",
+       .type = TI_CLK_FIXED,
+       .data = &sys_altclk_data,
+};
+
+static const char *omap_48m_fck_parents[] = {
+       "cm_96m_d2_fck",
+       "sys_altclk",
+};
+
+static struct ti_clk_mux omap_48m_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(omap_48m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_48m_fck_parents,
+};
+
+static struct ti_clk omap_48m_fck = {
+       .name = "omap_48m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_48m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_48m_fck = {
+       .name = "core_48m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_48m_fck_data,
+};
+
+static struct ti_clk_fixed mcbsp_clks_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk mcbsp_clks = {
+       .name = "mcbsp_clks",
+       .type = TI_CLK_FIXED,
+       .data = &mcbsp_clks_data,
+};
+
+static struct ti_clk_gate mcbsp2_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 0,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_96m_fck_data = {
+       .parent = "omap_96m_alwon_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_96m_fck = {
+       .name = "per_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_96m_fck_data,
+};
+
+static const char *mcbsp2_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp2_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents),
+       .reg = 0x274,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp2_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp2_fck_data = {
+       .mux = &mcbsp2_mux_fck_data,
+       .gate = &mcbsp2_gate_fck_data,
+};
+
+static struct ti_clk mcbsp2_fck = {
+       .name = "mcbsp2_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = {
+       .parent = "dpll3_m2_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_m2x2_ck = {
+       .name = "dpll3_m2x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_fck_data = {
+       .parent = "dpll3_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_fck = {
+       .name = "corex2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_fck_data,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = {
+       .parent = "corex2_fck",
+       .bit_shift = 0,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static int ssi_ssr_div_fck_3430es1_divs[] = {
+       0,
+       1,
+       2,
+       3,
+       4,
+       0,
+       6,
+       0,
+       8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = {
+       .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs),
+       .parent = "corex2_fck",
+       .bit_shift = 8,
+       .dividers = ssi_ssr_div_fck_3430es1_divs,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es1_data = {
+       .gate = &ssi_ssr_gate_fck_3430es1_data,
+       .divider = &ssi_ssr_div_fck_3430es1_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es1 = {
+       .name = "ssi_ssr_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &ssi_ssr_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = {
+       .parent = "ssi_ssr_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es1 = {
+       .name = "ssi_sst_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_sst_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed omap_32k_fck_data = {
+       .frequency = 32768,
+};
+
+static struct ti_clk omap_32k_fck = {
+       .name = "omap_32k_fck",
+       .type = TI_CLK_FIXED,
+       .data = &omap_32k_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_32k_alwon_fck_data = {
+       .parent = "omap_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_32k_alwon_fck = {
+       .name = "per_32k_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_32k_alwon_fck_data,
+};
+
+static struct ti_clk_gate gpio5_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 16,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio5_dbck = {
+       .name = "gpio5_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio5_dbck_data,
+};
+
+static struct ti_clk_gate gpt1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt1_ick = {
+       .name = "gpt1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt1_ick_data,
+};
+
+static struct ti_clk_gate mcspi3_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 20,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi3_fck = {
+       .name = "mcspi3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi3_fck_data,
+};
+
+static struct ti_clk_gate gpt2_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt2_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt2_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(gpt2_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt2_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt2_fck_data = {
+       .mux = &gpt2_mux_fck_data,
+       .gate = &gpt2_gate_fck_data,
+};
+
+static struct ti_clk gpt2_fck = {
+       .name = "gpt2_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt2_fck_data,
+};
+
+static struct ti_clk_gate gpt10_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 11,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt10_ick = {
+       .name = "gpt10_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt10_ick_data,
+};
+
+static struct ti_clk_gate uart2_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 14,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart2_fck = {
+       .name = "uart2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart2_fck_data,
+};
+
+static struct ti_clk_fixed_factor sr_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk sr_l4_ick = {
+       .name = "sr_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &sr_l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d8_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 8,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d8_fck = {
+       .name = "omap_96m_d8_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d8_fck_data,
+};
+
+static struct ti_clk_divider dpll4_m5_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 63,
+       .reg = 0xf40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m5_ck = {
+       .name = "dpll4_m5_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m5_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = {
+       .parent = "dpll4_m5_ck",
+       .div = 1,
+       .mult = 2,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m5x2_mul_ck = {
+       .name = "dpll4_m5x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m5x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_data = {
+       .parent = "dpll4_m5x2_mul_ck",
+       .bit_shift = 0x1e,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck = {
+       .name = "dpll4_m5x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m5x2_ck_data,
+};
+
+static struct ti_clk_gate cam_mclk_data = {
+       .parent = "dpll4_m5x2_ck",
+       .bit_shift = 0,
+       .reg = 0xf00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk cam_mclk = {
+       .name = "cam_mclk",
+       .type = TI_CLK_GATE,
+       .data = &cam_mclk_data,
+};
+
+static struct ti_clk_gate mcbsp3_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 1,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *mcbsp3_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp3_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp3_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp3_fck_data = {
+       .mux = &mcbsp3_mux_fck_data,
+       .gate = &mcbsp3_gate_fck_data,
+};
+
+static struct ti_clk mcbsp3_fck = {
+       .name = "mcbsp3_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp3_fck_data,
+};
+
+static struct ti_clk_gate csi2_96m_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 1,
+       .reg = 0xf00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk csi2_96m_fck = {
+       .name = "csi2_96m_fck",
+       .clkdm_name = "cam_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &csi2_96m_fck_data,
+};
+
+static struct ti_clk_gate gpt9_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 10,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt9_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt9_mux_fck_data = {
+       .bit_shift = 7,
+       .num_parents = ARRAY_SIZE(gpt9_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt9_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt9_fck_data = {
+       .mux = &gpt9_mux_fck_data,
+       .gate = &gpt9_gate_fck_data,
+};
+
+static struct ti_clk gpt9_fck = {
+       .name = "gpt9_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt9_fck_data,
+};
+
+static struct ti_clk_divider dpll3_m3_ck_data = {
+       .parent = "dpll3_ck",
+       .bit_shift = 16,
+       .max_div = 31,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m3_ck = {
+       .name = "dpll3_m3_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll3_m3_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = {
+       .parent = "dpll3_m3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_m3x2_mul_ck = {
+       .name = "dpll3_m3x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate sr2_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 7,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr2_fck = {
+       .name = "sr2_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sr2_fck_data,
+};
+
+static struct ti_clk_fixed pclk_ck_data = {
+       .frequency = 27000000,
+};
+
+static struct ti_clk pclk_ck = {
+       .name = "pclk_ck",
+       .type = TI_CLK_FIXED,
+       .data = &pclk_ck_data,
+};
+
+static struct ti_clk_gate wdt2_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 5,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt2_ick = {
+       .name = "wdt2_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt2_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l3_ick_data = {
+       .parent = "l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_l3_ick = {
+       .name = "core_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_l3_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 21,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi4_fck = {
+       .name = "mcspi4_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi4_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_48m_fck = {
+       .name = "per_48m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_48m_fck_data,
+};
+
+static struct ti_clk_gate uart4_fck_data = {
+       .parent = "per_48m_fck",
+       .bit_shift = 18,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck = {
+       .name = "uart4_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_fck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d10_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 10,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d10_fck = {
+       .name = "omap_96m_d10_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d10_fck_data,
+};
+
+static struct ti_clk_gate usim_gate_fck_data = {
+       .parent = "omap_96m_fck",
+       .bit_shift = 9,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_l4_ick = {
+       .name = "per_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_l4_ick_data,
+};
+
+static struct ti_clk_gate gpt5_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 6,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt5_ick = {
+       .name = "gpt5_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt5_ick_data,
+};
+
+static struct ti_clk_gate mcspi2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 19,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi2_ick = {
+       .name = "mcspi2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi2_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_l4_ick = {
+       .name = "ssi_l4_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_l4_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es1_data = {
+       .parent = "ssi_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es1 = {
+       .name = "ssi_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ssi_ick_3430es1_data,
+};
+
+static struct ti_clk_gate i2c2_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 16,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c2_fck = {
+       .name = "i2c2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c2_fck_data,
+};
+
+static struct ti_clk_divider dpll1_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 19,
+       .max_div = 7,
+       .reg = 0x940,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_fck = {
+       .name = "dpll1_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll1_fck_data,
+};
+
+static const char *dpll1_ck_parents[] = {
+       "sys_ck",
+       "dpll1_fck",
+};
+
+static struct ti_clk_dpll dpll1_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll1_ck_parents),
+       .control_reg = 0x904,
+       .idlest_reg = 0x924,
+       .mult_div1_reg = 0x940,
+       .autoidle_reg = 0x934,
+       .module = TI_CLKM_CM,
+       .parents = dpll1_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0xa0,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x7,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x7,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll1_ck = {
+       .name = "dpll1_ck",
+       .clkdm_name = "dpll1_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll1_ck_data,
+};
+
+static struct ti_clk_fixed secure_32k_fck_data = {
+       .frequency = 32768,
+};
+
+static struct ti_clk secure_32k_fck = {
+       .name = "secure_32k_fck",
+       .type = TI_CLK_FIXED,
+       .data = &secure_32k_fck_data,
+};
+
+static struct ti_clk_gate gpio5_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 16,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio5_ick = {
+       .name = "gpio5_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio5_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m4_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 32,
+       .reg = 0xe40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m4_ck = {
+       .name = "dpll4_m4_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m4_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = {
+       .parent = "dpll4_m4_ck",
+       .div = 1,
+       .mult = 2,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m4x2_mul_ck = {
+       .name = "dpll4_m4x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m4x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m4x2_ck_data = {
+       .parent = "dpll4_m4x2_mul_ck",
+       .bit_shift = 0x1d,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m4x2_ck = {
+       .name = "dpll4_m4x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m4x2_ck_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es2_data = {
+       .parent = "dpll4_m4x2_ck",
+       .bit_shift = 0,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es2 = {
+       .name = "dss1_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss1_alwon_fck_3430es2_data,
+};
+
+static struct ti_clk_gate uart3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 11,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart3_ick = {
+       .name = "uart3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart3_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m3_ck_data = {
+       .parent = "dpll4_ck",
+       .bit_shift = 8,
+       .max_div = 32,
+       .reg = 0xe40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m3_ck = {
+       .name = "dpll4_m3_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m3_ck_data,
+};
+
+static struct ti_clk_gate mcbsp3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 1,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp3_ick = {
+       .name = "mcbsp3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 14,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio3_dbck = {
+       .name = "gpio3_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio3_dbck_data,
+};
+
+static struct ti_clk_gate fac_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 8,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk fac_ick = {
+       .name = "fac_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &fac_ick_data,
+};
+
+static struct ti_clk_gate clkout2_src_gate_ck_data = {
+       .parent = "core_ck",
+       .bit_shift = 7,
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = {
+       .parent = "dpll4_m3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m3x2_mul_ck = {
+       .name = "dpll4_m3x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_data = {
+       .parent = "dpll4_m3x2_mul_ck",
+       .bit_shift = 0x1c,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck = {
+       .name = "dpll4_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m3x2_ck_data,
+};
+
+static const char *omap_54m_fck_parents[] = {
+       "dpll4_m3x2_ck",
+       "sys_altclk",
+};
+
+static struct ti_clk_mux omap_54m_fck_data = {
+       .bit_shift = 5,
+       .num_parents = ARRAY_SIZE(omap_54m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_54m_fck_parents,
+};
+
+static struct ti_clk omap_54m_fck = {
+       .name = "omap_54m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_54m_fck_data,
+};
+
+static const char *clkout2_src_mux_ck_parents[] = {
+       "core_ck",
+       "sys_ck",
+       "cm_96m_fck",
+       "omap_54m_fck",
+};
+
+static struct ti_clk_mux clkout2_src_mux_ck_data = {
+       .num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents),
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .parents = clkout2_src_mux_ck_parents,
+};
+
+static struct ti_clk_composite clkout2_src_ck_data = {
+       .mux = &clkout2_src_mux_ck_data,
+       .gate = &clkout2_src_gate_ck_data,
+};
+
+static struct ti_clk clkout2_src_ck = {
+       .name = "clkout2_src_ck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &clkout2_src_ck_data,
+};
+
+static struct ti_clk_gate i2c1_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 15,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c1_fck = {
+       .name = "i2c1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c1_fck_data,
+};
+
+static struct ti_clk_gate wdt3_fck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 12,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt3_fck = {
+       .name = "wdt3_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt3_fck_data,
+};
+
+static struct ti_clk_gate gpt7_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 8,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt7_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt7_mux_fck_data = {
+       .bit_shift = 5,
+       .num_parents = ARRAY_SIZE(gpt7_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt7_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt7_fck_data = {
+       .mux = &gpt7_mux_fck_data,
+       .gate = &gpt7_gate_fck_data,
+};
+
+static struct ti_clk gpt7_fck = {
+       .name = "gpt7_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt7_fck_data,
+};
+
+static struct ti_clk_gate usb_l4_gate_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 5,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INTERFACE,
+};
+
+static struct ti_clk_divider usb_l4_div_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 4,
+       .max_div = 1,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usb_l4_ick_data = {
+       .gate = &usb_l4_gate_ick_data,
+       .divider = &usb_l4_div_ick_data,
+};
+
+static struct ti_clk usb_l4_ick = {
+       .name = "usb_l4_ick",
+       .type = TI_CLK_COMPOSITE,
+       .data = &usb_l4_ick_data,
+};
+
+static struct ti_clk_gate uart4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 18,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick = {
+       .name = "uart4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_ick_data,
+};
+
+static struct ti_clk_fixed dummy_ck_data = {
+       .frequency = 0,
+};
+
+static struct ti_clk dummy_ck = {
+       .name = "dummy_ck",
+       .type = TI_CLK_FIXED,
+       .data = &dummy_ck_data,
+};
+
+static const char *gpt3_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt3_mux_fck_data = {
+       .bit_shift = 1,
+       .num_parents = ARRAY_SIZE(gpt3_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt3_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt9_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 10,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt9_ick = {
+       .name = "gpt9_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt9_ick_data,
+};
+
+static struct ti_clk_gate gpt10_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 11,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate dss_ick_3430es1_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xe10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es1 = {
+       .name = "dss_ick",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpt11_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 12,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt11_ick = {
+       .name = "gpt11_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt11_ick_data,
+};
+
+static struct ti_clk_divider dpll2_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 19,
+       .max_div = 7,
+       .reg = 0x40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_fck = {
+       .name = "dpll2_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll2_fck_data,
+};
+
+static struct ti_clk_gate uart1_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 13,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart1_fck = {
+       .name = "uart1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart1_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es1_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es1 = {
+       .name = "hsotgusb_ick_3430es1",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpio2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 13,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio2_ick = {
+       .name = "gpio2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 24,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs1_ick = {
+       .name = "mmchs1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs1_ick_data,
+};
+
+static struct ti_clk_gate modem_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 31,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk modem_fck = {
+       .name = "modem_fck",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &modem_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 2,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp4_ick = {
+       .name = "mcbsp4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp4_ick_data,
+};
+
+static struct ti_clk_gate gpio1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 3,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio1_ick = {
+       .name = "gpio1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio1_ick_data,
+};
+
+static const char *gpt6_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt6_mux_fck_data = {
+       .bit_shift = 4,
+       .num_parents = ARRAY_SIZE(gpt6_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt6_mux_fck_parents,
+};
+
+static struct ti_clk_fixed_factor dpll1_x2_ck_data = {
+       .parent = "dpll1_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll1_x2_ck = {
+       .name = "dpll1_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll1_x2_ck_data,
+};
+
+static struct ti_clk_divider dpll1_x2m2_ck_data = {
+       .parent = "dpll1_x2_ck",
+       .max_div = 31,
+       .reg = 0x944,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_x2m2_ck = {
+       .name = "dpll1_x2m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll1_x2m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor mpu_ck_data = {
+       .parent = "dpll1_x2m2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk mpu_ck = {
+       .name = "mpu_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &mpu_ck_data,
+};
+
+static struct ti_clk_divider arm_fck_data = {
+       .parent = "mpu_ck",
+       .max_div = 2,
+       .reg = 0x924,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk arm_fck = {
+       .name = "arm_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &arm_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d3_ck_data = {
+       .parent = "core_ck",
+       .div = 3,
+       .mult = 1,
+};
+
+static struct ti_clk core_d3_ck = {
+       .name = "core_d3_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d3_ck_data,
+};
+
+static struct ti_clk_gate gpt11_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 12,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt11_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt11_mux_fck_data = {
+       .bit_shift = 7,
+       .num_parents = ARRAY_SIZE(gpt11_mux_fck_parents),
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .parents = gpt11_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt11_fck_data = {
+       .mux = &gpt11_mux_fck_data,
+       .gate = &gpt11_gate_fck_data,
+};
+
+static struct ti_clk gpt11_fck = {
+       .name = "gpt11_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt11_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d6_ck_data = {
+       .parent = "core_ck",
+       .div = 6,
+       .mult = 1,
+};
+
+static struct ti_clk core_d6_ck = {
+       .name = "core_d6_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d6_ck_data,
+};
+
+static struct ti_clk_gate uart4_fck_am35xx_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 23,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck_am35xx = {
+       .name = "uart4_fck_am35xx",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_fck_am35xx_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_data = {
+       .parent = "dpll3_m3x2_mul_ck",
+       .bit_shift = 0xc,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck = {
+       .name = "dpll3_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll3_m3x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_core_alwon_ck_data = {
+       .parent = "dpll3_m3x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_core_alwon_ck = {
+       .name = "emu_core_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_core_alwon_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m6_ck_data = {
+       .parent = "dpll4_ck",
+       .bit_shift = 24,
+       .max_div = 63,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m6_ck = {
+       .name = "dpll4_m6_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m6_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = {
+       .parent = "dpll4_m6_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m6x2_mul_ck = {
+       .name = "dpll4_m6x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m6x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_data = {
+       .parent = "dpll4_m6x2_mul_ck",
+       .bit_shift = 0x1f,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck = {
+       .name = "dpll4_m6x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m6x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_per_alwon_ck_data = {
+       .parent = "dpll4_m6x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_per_alwon_ck = {
+       .name = "emu_per_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_per_alwon_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = {
+       .parent = "mpu_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_mpu_alwon_ck = {
+       .name = "emu_mpu_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_mpu_alwon_ck_data,
+};
+
+static const char *emu_src_mux_ck_parents[] = {
+       "sys_ck",
+       "emu_core_alwon_ck",
+       "emu_per_alwon_ck",
+       "emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux emu_src_mux_ck_data = {
+       .num_parents = ARRAY_SIZE(emu_src_mux_ck_parents),
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .parents = emu_src_mux_ck_parents,
+};
+
+static struct ti_clk emu_src_mux_ck = {
+       .name = "emu_src_mux_ck",
+       .type = TI_CLK_MUX,
+       .data = &emu_src_mux_ck_data,
+};
+
+static struct ti_clk_gate emu_src_ck_data = {
+       .parent = "emu_src_mux_ck",
+       .flags = CLKF_CLKDM,
+};
+
+static struct ti_clk emu_src_ck = {
+       .name = "emu_src_ck",
+       .clkdm_name = "emu_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &emu_src_ck_data,
+};
+
+static struct ti_clk_divider atclk_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 4,
+       .max_div = 3,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk atclk_fck = {
+       .name = "atclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &atclk_fck_data,
+};
+
+static struct ti_clk_gate ipss_ick_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_AM35XX | CLKF_INTERFACE,
+};
+
+static struct ti_clk ipss_ick = {
+       .name = "ipss_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ipss_ick_data,
+};
+
+static struct ti_clk_gate emac_ick_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 1,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk emac_ick = {
+       .name = "emac_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &emac_ick_data,
+};
+
+static struct ti_clk_gate vpfe_ick_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 2,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk vpfe_ick = {
+       .name = "vpfe_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &vpfe_ick_data,
+};
+
+static const char *dpll2_ck_parents[] = {
+       "sys_ck",
+       "dpll2_fck",
+};
+
+static struct ti_clk_dpll dpll2_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll2_ck_parents),
+       .control_reg = 0x4,
+       .idlest_reg = 0x24,
+       .mult_div1_reg = 0x40,
+       .autoidle_reg = 0x34,
+       .module = TI_CLKM_CM,
+       .parents = dpll2_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0xa2,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x8,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x8,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll2_ck = {
+       .name = "dpll2_ck",
+       .clkdm_name = "dpll2_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll2_ck_data,
+};
+
+static struct ti_clk_divider dpll2_m2_ck_data = {
+       .parent = "dpll2_ck",
+       .max_div = 31,
+       .reg = 0x44,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_m2_ck = {
+       .name = "dpll2_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll2_m2_ck_data,
+};
+
+static const char *mcbsp4_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp4_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp4_mux_fck_parents,
+};
+
+static const char *mcbsp1_mux_fck_parents[] = {
+       "core_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp1_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents),
+       .reg = 0x274,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp1_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt8_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 9,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt8_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 9,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt8_ick = {
+       .name = "gpt8_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt8_ick_data,
+};
+
+static const char *gpt10_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt10_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(gpt10_mux_fck_parents),
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .parents = gpt10_mux_fck_parents,
+};
+
+static struct ti_clk_gate mmchs3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 30,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs3_ick = {
+       .name = "mmchs3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 14,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio3_ick = {
+       .name = "gpio3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio3_ick_data,
+};
+
+static const char *traceclk_src_fck_parents[] = {
+       "sys_ck",
+       "emu_core_alwon_ck",
+       "emu_per_alwon_ck",
+       "emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux traceclk_src_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(traceclk_src_fck_parents),
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .parents = traceclk_src_fck_parents,
+};
+
+static struct ti_clk traceclk_src_fck = {
+       .name = "traceclk_src_fck",
+       .type = TI_CLK_MUX,
+       .data = &traceclk_src_fck_data,
+};
+
+static struct ti_clk_divider traceclk_fck_data = {
+       .parent = "traceclk_src_fck",
+       .bit_shift = 11,
+       .max_div = 7,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk traceclk_fck = {
+       .name = "traceclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &traceclk_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 10,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate sad2d_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 3,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sad2d_ick = {
+       .name = "sad2d_ick",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sad2d_ick_data,
+};
+
+static const char *gpt1_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt1_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(gpt1_mux_fck_parents),
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .parents = gpt1_mux_fck_parents,
+};
+
+static struct ti_clk_gate hecc_ck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hecc_ck = {
+       .name = "hecc_ck",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hecc_ck_data,
+};
+
+static struct ti_clk_gate gpt1_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 0,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt1_fck_data = {
+       .mux = &gpt1_mux_fck_data,
+       .gate = &gpt1_gate_fck_data,
+};
+
+static struct ti_clk gpt1_fck = {
+       .name = "gpt1_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = {
+       .parent = "dpll4_m2x2_mul_ck",
+       .bit_shift = 0x1b,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck_omap36xx = {
+       .name = "dpll4_m2x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m2x2_ck_omap36xx_data,
+       .patch = &dpll4_m2x2_ck,
+};
+
+static struct ti_clk_divider gfx_l3_fck_data = {
+       .parent = "l3_ick",
+       .max_div = 7,
+       .reg = 0xb40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk gfx_l3_fck = {
+       .name = "gfx_l3_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &gfx_l3_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg1_ck_data = {
+       .parent = "gfx_l3_fck",
+       .bit_shift = 1,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg1_ck = {
+       .name = "gfx_cg1_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_cg1_ck_data,
+};
+
+static struct ti_clk_gate mailboxes_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 7,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mailboxes_ick = {
+       .name = "mailboxes_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mailboxes_ick_data,
+};
+
+static struct ti_clk_gate sha11_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 1,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha11_ick = {
+       .name = "sha11_ick",
+       .type = TI_CLK_GATE,
+       .data = &sha11_ick_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_am35xx_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 0,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hsotgusb_ick_am35xx = {
+       .name = "hsotgusb_ick_am35xx",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_am35xx_data,
+};
+
+static struct ti_clk_gate mmchs3_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 30,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs3_fck = {
+       .name = "mmchs3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs3_fck_data,
+};
+
+static struct ti_clk_divider pclk_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 8,
+       .max_div = 7,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclk_fck = {
+       .name = "pclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &pclk_fck_data,
+};
+
+static const char *dpll4_ck_omap36xx_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_omap36xx_data = {
+       .num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd44,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll4_ck_omap36xx_parents,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x2,
+       .auto_recal_bit = 0x13,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x6,
+       .max_multiplier = 0xfff,
+       .enable_mask = 0x70000,
+       .mult_mask = 0xfff00,
+       .recal_st_bit = 0x6,
+       .autoidle_mask = 0x38,
+       .sddiv_mask = 0xff000000,
+       .dco_mask = 0xe00000,
+       .flags = CLKF_PER | CLKF_J_TYPE,
+};
+
+static struct ti_clk dpll4_ck_omap36xx = {
+       .name = "dpll4_ck",
+       .type = TI_CLK_DPLL,
+       .data = &dpll4_ck_omap36xx_data,
+       .patch = &dpll4_ck,
+};
+
+static struct ti_clk_gate uart3_fck_data = {
+       .parent = "per_48m_fck",
+       .bit_shift = 11,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart3_fck = {
+       .name = "uart3_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart3_fck_data,
+};
+
+static struct ti_clk_fixed_factor wkup_32k_fck_data = {
+       .parent = "omap_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wkup_32k_fck = {
+       .name = "wkup_32k_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wkup_32k_fck_data,
+};
+
+static struct ti_clk_gate sys_clkout1_data = {
+       .parent = "osc_sys_ck",
+       .bit_shift = 7,
+       .reg = 0xd70,
+       .module = TI_CLKM_PRM,
+};
+
+static struct ti_clk sys_clkout1 = {
+       .name = "sys_clkout1",
+       .type = TI_CLK_GATE,
+       .data = &sys_clkout1_data,
+};
+
+static struct ti_clk_fixed_factor gpmc_fck_data = {
+       .parent = "core_l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gpmc_fck = {
+       .name = "gpmc_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gpmc_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 20,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d20_ck = {
+       .name = "dpll5_m2_d20_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d20_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = {
+       .parent = "dpll4_m5x2_mul_ck",
+       .bit_shift = 0x1e,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck_omap36xx = {
+       .name = "dpll4_m5x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m5x2_ck_omap36xx_data,
+       .patch = &dpll4_m5x2_ck,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = {
+       .parent = "corex2_fck",
+       .bit_shift = 0,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_gate uart1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 13,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart1_ick = {
+       .name = "uart1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart1_ick_data,
+};
+
+static struct ti_clk_gate iva2_ck_data = {
+       .parent = "dpll2_m2_ck",
+       .bit_shift = 0,
+       .reg = 0x0,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk iva2_ck = {
+       .name = "iva2_ck",
+       .clkdm_name = "iva2_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &iva2_ck_data,
+};
+
+static struct ti_clk_gate pka_ick_data = {
+       .parent = "security_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk pka_ick = {
+       .name = "pka_ick",
+       .type = TI_CLK_GATE,
+       .data = &pka_ick_data,
+};
+
+static struct ti_clk_gate gpt12_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 1,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt12_ick = {
+       .name = "gpt12_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt12_ick_data,
+};
+
+static const char *mcbsp5_mux_fck_parents[] = {
+       "core_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp5_mux_fck_data = {
+       .bit_shift = 4,
+       .num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp5_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp5_fck_data = {
+       .mux = &mcbsp5_mux_fck_data,
+       .gate = &mcbsp5_gate_fck_data,
+};
+
+static struct ti_clk mcbsp5_fck = {
+       .name = "mcbsp5_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp5_fck_data,
+};
+
+static struct ti_clk_gate usbhost_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .bit_shift = 0,
+       .reg = 0x1400,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS,
+};
+
+static struct ti_clk usbhost_48m_fck = {
+       .name = "usbhost_48m_fck",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_48m_fck_data,
+};
+
+static struct ti_clk_gate des1_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 0,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des1_ick = {
+       .name = "des1_ick",
+       .type = TI_CLK_GATE,
+       .data = &des1_ick_data,
+};
+
+static struct ti_clk_gate sgx_gate_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 1,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor core_d4_ck_data = {
+       .parent = "core_ck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk core_d4_ck = {
+       .name = "core_d4_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = {
+       .parent = "dpll4_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk omap_192m_alwon_fck = {
+       .name = "omap_192m_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_192m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d2_ck_data = {
+       .parent = "core_ck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk core_d2_ck = {
+       .name = "core_d2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d3_fck_data = {
+       .parent = "corex2_fck",
+       .div = 3,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_d3_fck = {
+       .name = "corex2_d3_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_d3_fck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d5_fck_data = {
+       .parent = "corex2_fck",
+       .div = 5,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_d5_fck = {
+       .name = "corex2_d5_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_d5_fck_data,
+};
+
+static const char *sgx_mux_fck_parents[] = {
+       "core_d3_ck",
+       "core_d4_ck",
+       "core_d6_ck",
+       "cm_96m_fck",
+       "omap_192m_alwon_fck",
+       "core_d2_ck",
+       "corex2_d3_fck",
+       "corex2_d5_fck",
+};
+
+static struct ti_clk_mux sgx_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(sgx_mux_fck_parents),
+       .reg = 0xb40,
+       .module = TI_CLKM_CM,
+       .parents = sgx_mux_fck_parents,
+};
+
+static struct ti_clk_composite sgx_fck_data = {
+       .mux = &sgx_mux_fck_data,
+       .gate = &sgx_gate_fck_data,
+};
+
+static struct ti_clk sgx_fck = {
+       .name = "sgx_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &sgx_fck_data,
+};
+
+static struct ti_clk_gate mcspi1_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 18,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi1_fck = {
+       .name = "mcspi1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi1_fck_data,
+};
+
+static struct ti_clk_gate mmchs2_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 25,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs2_fck = {
+       .name = "mmchs2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs2_fck_data,
+};
+
+static struct ti_clk_gate mcspi2_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 19,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi2_fck = {
+       .name = "mcspi2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi2_fck_data,
+};
+
+static struct ti_clk_gate vpfe_fck_data = {
+       .parent = "pclk_ck",
+       .bit_shift = 10,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk vpfe_fck = {
+       .name = "vpfe_fck",
+       .type = TI_CLK_GATE,
+       .data = &vpfe_fck_data,
+};
+
+static struct ti_clk_gate gpt4_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 5,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate mcbsp1_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 9,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt5_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 6,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt5_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt5_mux_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(gpt5_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt5_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt5_fck_data = {
+       .mux = &gpt5_mux_fck_data,
+       .gate = &gpt5_gate_fck_data,
+};
+
+static struct ti_clk gpt5_fck = {
+       .name = "gpt5_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt5_fck_data,
+};
+
+static struct ti_clk_gate ts_fck_data = {
+       .parent = "omap_32k_fck",
+       .bit_shift = 1,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk ts_fck = {
+       .name = "ts_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ts_fck_data,
+};
+
+static struct ti_clk_fixed_factor wdt1_fck_data = {
+       .parent = "secure_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wdt1_fck = {
+       .name = "wdt1_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wdt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = {
+       .parent = "dpll4_m6x2_mul_ck",
+       .bit_shift = 0x1f,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck_omap36xx = {
+       .name = "dpll4_m6x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m6x2_ck_omap36xx_data,
+       .patch = &dpll4_m6x2_ck,
+};
+
+static const char *gpt4_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt4_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(gpt4_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt4_mux_fck_parents,
+};
+
+static struct ti_clk_gate usbhost_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0x1410,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbhost_ick = {
+       .name = "usbhost_ick",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_ick_data,
+};
+
+static struct ti_clk_gate mcbsp2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 0,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp2_ick = {
+       .name = "mcbsp2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp2_ick_data,
+};
+
+static struct ti_clk_gate omapctrl_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 6,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omapctrl_ick = {
+       .name = "omapctrl_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &omapctrl_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d4_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d4_fck = {
+       .name = "omap_96m_d4_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d4_fck_data,
+};
+
+static struct ti_clk_gate gpt6_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 7,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt6_ick = {
+       .name = "gpt6_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt6_ick_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = {
+       .parent = "dpll3_m3x2_mul_ck",
+       .bit_shift = 0xc,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck_omap36xx = {
+       .name = "dpll3_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll3_m3x2_ck_omap36xx_data,
+       .patch = &dpll3_m3x2_ck,
+};
+
+static struct ti_clk_gate i2c3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 17,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c3_ick = {
+       .name = "i2c3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c3_ick_data,
+};
+
+static struct ti_clk_gate gpio6_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 17,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio6_ick = {
+       .name = "gpio6_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio6_ick_data,
+};
+
+static struct ti_clk_gate mspro_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 23,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mspro_ick = {
+       .name = "mspro_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mspro_ick_data,
+};
+
+static struct ti_clk_composite mcbsp1_fck_data = {
+       .mux = &mcbsp1_mux_fck_data,
+       .gate = &mcbsp1_gate_fck_data,
+};
+
+static struct ti_clk mcbsp1_fck = {
+       .name = "mcbsp1_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp1_fck_data,
+};
+
+static struct ti_clk_gate gpt3_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 4,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed rmii_ck_data = {
+       .frequency = 50000000,
+};
+
+static struct ti_clk rmii_ck = {
+       .name = "rmii_ck",
+       .type = TI_CLK_FIXED,
+       .data = &rmii_ck_data,
+};
+
+static struct ti_clk_gate gpt6_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 7,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt6_fck_data = {
+       .mux = &gpt6_mux_fck_data,
+       .gate = &gpt6_gate_fck_data,
+};
+
+static struct ti_clk gpt6_fck = {
+       .name = "gpt6_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt6_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d4_ck = {
+       .name = "dpll5_m2_d4_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor sys_d2_ck_data = {
+       .parent = "sys_ck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk sys_d2_ck = {
+       .name = "sys_d2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &sys_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d2_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d2_fck = {
+       .name = "omap_96m_d2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 8,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d8_ck = {
+       .name = "dpll5_m2_d8_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d8_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 16,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d16_ck = {
+       .name = "dpll5_m2_d16_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d16_ck_data,
+};
+
+static const char *usim_mux_fck_parents[] = {
+       "sys_ck",
+       "sys_d2_ck",
+       "omap_96m_d2_fck",
+       "omap_96m_d4_fck",
+       "omap_96m_d8_fck",
+       "omap_96m_d10_fck",
+       "dpll5_m2_d4_ck",
+       "dpll5_m2_d8_ck",
+       "dpll5_m2_d16_ck",
+       "dpll5_m2_d20_ck",
+};
+
+static struct ti_clk_mux usim_mux_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(usim_mux_fck_parents),
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .parents = usim_mux_fck_parents,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usim_fck_data = {
+       .mux = &usim_mux_fck_data,
+       .gate = &usim_gate_fck_data,
+};
+
+static struct ti_clk usim_fck = {
+       .name = "usim_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &usim_fck_data,
+};
+
+static int ssi_ssr_div_fck_3430es2_divs[] = {
+       0,
+       1,
+       2,
+       3,
+       4,
+       0,
+       6,
+       0,
+       8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = {
+       .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs),
+       .parent = "corex2_fck",
+       .bit_shift = 8,
+       .dividers = ssi_ssr_div_fck_3430es2_divs,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es2_data = {
+       .gate = &ssi_ssr_gate_fck_3430es2_data,
+       .divider = &ssi_ssr_div_fck_3430es2_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es2 = {
+       .name = "ssi_ssr_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &ssi_ssr_fck_3430es2_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es1_data = {
+       .parent = "dpll4_m4x2_ck",
+       .bit_shift = 0,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es1 = {
+       .name = "dss1_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss1_alwon_fck_3430es1_data,
+};
+
+static struct ti_clk_gate gpt3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 4,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt3_ick = {
+       .name = "gpt3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt3_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_12m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk omap_12m_fck = {
+       .name = "omap_12m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_12m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_12m_fck_data = {
+       .parent = "omap_12m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_12m_fck = {
+       .name = "core_12m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_12m_fck_data,
+};
+
+static struct ti_clk_gate hdq_fck_data = {
+       .parent = "core_12m_fck",
+       .bit_shift = 22,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk hdq_fck = {
+       .name = "hdq_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hdq_fck_data,
+};
+
+static struct ti_clk_gate usbtll_fck_data = {
+       .parent = "dpll5_m2_ck",
+       .bit_shift = 2,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk usbtll_fck = {
+       .name = "usbtll_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbtll_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_fck_am35xx_data = {
+       .parent = "sys_ck",
+       .bit_shift = 8,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk hsotgusb_fck_am35xx = {
+       .name = "hsotgusb_fck_am35xx",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_fck_am35xx_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es2_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es2 = {
+       .name = "hsotgusb_ick_3430es2",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gfx_l3_ck_data = {
+       .parent = "l3_ick",
+       .bit_shift = 0,
+       .reg = 0xb10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_l3_ck = {
+       .name = "gfx_l3_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_l3_ck_data,
+};
+
+static struct ti_clk_fixed_factor gfx_l3_ick_data = {
+       .parent = "gfx_l3_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gfx_l3_ick = {
+       .name = "gfx_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gfx_l3_ick_data,
+};
+
+static struct ti_clk_gate mcbsp1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 9,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp1_ick = {
+       .name = "mcbsp1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp1_ick_data,
+};
+
+static struct ti_clk_fixed_factor gpt12_fck_data = {
+       .parent = "secure_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gpt12_fck = {
+       .name = "gpt12_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gpt12_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg2_ck_data = {
+       .parent = "gfx_l3_fck",
+       .bit_shift = 2,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg2_ck = {
+       .name = "gfx_cg2_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_cg2_ck_data,
+};
+
+static struct ti_clk_gate i2c2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 16,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c2_ick = {
+       .name = "i2c2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c2_ick_data,
+};
+
+static struct ti_clk_gate gpio4_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 15,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio4_dbck = {
+       .name = "gpio4_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio4_dbck_data,
+};
+
+static struct ti_clk_gate i2c3_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 17,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c3_fck = {
+       .name = "i2c3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c3_fck_data,
+};
+
+static struct ti_clk_composite gpt3_fck_data = {
+       .mux = &gpt3_mux_fck_data,
+       .gate = &gpt3_gate_fck_data,
+};
+
+static struct ti_clk gpt3_fck = {
+       .name = "gpt3_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt3_fck_data,
+};
+
+static struct ti_clk_gate i2c1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 15,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c1_ick = {
+       .name = "i2c1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c1_ick_data,
+};
+
+static struct ti_clk_gate omap_32ksync_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 2,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omap_32ksync_ick = {
+       .name = "omap_32ksync_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &omap_32ksync_ick_data,
+};
+
+static struct ti_clk_gate aes2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 28,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes2_ick = {
+       .name = "aes2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &aes2_ick_data,
+};
+
+static const char *gpt8_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt8_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(gpt8_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt8_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt8_fck_data = {
+       .mux = &gpt8_mux_fck_data,
+       .gate = &gpt8_gate_fck_data,
+};
+
+static struct ti_clk gpt8_fck = {
+       .name = "gpt8_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt8_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 2,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite mcbsp4_fck_data = {
+       .mux = &mcbsp4_mux_fck_data,
+       .gate = &mcbsp4_gate_fck_data,
+};
+
+static struct ti_clk mcbsp4_fck = {
+       .name = "mcbsp4_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp4_fck_data,
+};
+
+static struct ti_clk_gate gpio2_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 13,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio2_dbck = {
+       .name = "gpio2_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio2_dbck_data,
+};
+
+static struct ti_clk_gate usbtll_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 2,
+       .reg = 0xa18,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbtll_ick = {
+       .name = "usbtll_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbtll_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 21,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi4_ick = {
+       .name = "mcspi4_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi4_ick_data,
+};
+
+static struct ti_clk_gate dss_96m_fck_data = {
+       .parent = "omap_96m_fck",
+       .bit_shift = 2,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_96m_fck = {
+       .name = "dss_96m_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_96m_fck_data,
+};
+
+static struct ti_clk_divider rm_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 1,
+       .max_div = 3,
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk rm_ick = {
+       .name = "rm_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &rm_ick_data,
+};
+
+static struct ti_clk_gate hdq_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 22,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hdq_ick = {
+       .name = "hdq_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hdq_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_x2_ck_data = {
+       .parent = "dpll3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_x2_ck = {
+       .name = "dpll3_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_x2_ck_data,
+};
+
+static struct ti_clk_gate mad2d_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 3,
+       .reg = 0xa18,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mad2d_ick = {
+       .name = "mad2d_ick",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mad2d_ick_data,
+};
+
+static struct ti_clk_gate fshostusb_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 5,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk fshostusb_fck = {
+       .name = "fshostusb_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &fshostusb_fck_data,
+};
+
+static struct ti_clk_gate sr1_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 6,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr1_fck = {
+       .name = "sr1_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sr1_fck_data,
+};
+
+static struct ti_clk_gate des2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 26,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des2_ick = {
+       .name = "des2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &des2_ick_data,
+};
+
+static struct ti_clk_gate sdrc_ick_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 1,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sdrc_ick = {
+       .name = "sdrc_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sdrc_ick_data,
+};
+
+static struct ti_clk_composite gpt4_fck_data = {
+       .mux = &gpt4_mux_fck_data,
+       .gate = &gpt4_gate_fck_data,
+};
+
+static struct ti_clk gpt4_fck = {
+       .name = "gpt4_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt4_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = {
+       .parent = "dpll4_m3x2_mul_ck",
+       .bit_shift = 0x1c,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck_omap36xx = {
+       .name = "dpll4_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m3x2_ck_omap36xx_data,
+       .patch = &dpll4_m3x2_ck,
+};
+
+static struct ti_clk_gate cpefuse_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 0,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk cpefuse_fck = {
+       .name = "cpefuse_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &cpefuse_fck_data,
+};
+
+static struct ti_clk_gate mcspi3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 20,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi3_ick = {
+       .name = "mcspi3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi3_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = {
+       .parent = "ssi_ssr_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es2 = {
+       .name = "ssi_sst_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_sst_fck_3430es2_data,
+};
+
+static struct ti_clk_gate gpio1_dbck_data = {
+       .parent = "wkup_32k_fck",
+       .bit_shift = 3,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio1_dbck = {
+       .name = "gpio1_dbck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio1_dbck_data,
+};
+
+static struct ti_clk_gate gpt4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 5,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt4_ick = {
+       .name = "gpt4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt4_ick_data,
+};
+
+static struct ti_clk_gate gpt2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 3,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt2_ick = {
+       .name = "gpt2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 24,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs1_fck = {
+       .name = "mmchs1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs1_fck_data,
+};
+
+static struct ti_clk_fixed dummy_apb_pclk_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk dummy_apb_pclk = {
+       .name = "dummy_apb_pclk",
+       .type = TI_CLK_FIXED,
+       .data = &dummy_apb_pclk_data,
+};
+
+static struct ti_clk_gate gpio6_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 17,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio6_dbck = {
+       .name = "gpio6_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio6_dbck_data,
+};
+
+static struct ti_clk_gate uart2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 14,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart2_ick = {
+       .name = "uart2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart2_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_x2_ck_data = {
+       .parent = "dpll4_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_x2_ck = {
+       .name = "dpll4_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_x2_ck_data,
+};
+
+static struct ti_clk_gate gpt7_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 8,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt7_ick = {
+       .name = "gpt7_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt7_ick_data,
+};
+
+static struct ti_clk_gate dss_tv_fck_data = {
+       .parent = "omap_54m_fck",
+       .bit_shift = 2,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_tv_fck = {
+       .name = "dss_tv_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_tv_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 10,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp5_ick = {
+       .name = "mcbsp5_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp5_ick_data,
+};
+
+static struct ti_clk_gate mcspi1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 18,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi1_ick = {
+       .name = "mcspi1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi1_ick_data,
+};
+
+static struct ti_clk_gate d2d_26m_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk d2d_26m_fck = {
+       .name = "d2d_26m_fck",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &d2d_26m_fck_data,
+};
+
+static struct ti_clk_gate wdt3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 12,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt3_ick = {
+       .name = "wdt3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt3_ick_data,
+};
+
+static struct ti_clk_divider pclkx2_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 6,
+       .max_div = 3,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclkx2_fck = {
+       .name = "pclkx2_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &pclkx2_fck_data,
+};
+
+static struct ti_clk_gate sha12_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 27,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha12_ick = {
+       .name = "sha12_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sha12_ick_data,
+};
+
+static struct ti_clk_gate emac_fck_data = {
+       .parent = "rmii_ck",
+       .bit_shift = 9,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk emac_fck = {
+       .name = "emac_fck",
+       .type = TI_CLK_GATE,
+       .data = &emac_fck_data,
+};
+
+static struct ti_clk_composite gpt10_fck_data = {
+       .mux = &gpt10_mux_fck_data,
+       .gate = &gpt10_gate_fck_data,
+};
+
+static struct ti_clk gpt10_fck = {
+       .name = "gpt10_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt10_fck_data,
+};
+
+static struct ti_clk_gate wdt2_fck_data = {
+       .parent = "wkup_32k_fck",
+       .bit_shift = 5,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt2_fck = {
+       .name = "wdt2_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt2_fck_data,
+};
+
+static struct ti_clk_gate cam_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xf10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk cam_ick = {
+       .name = "cam_ick",
+       .clkdm_name = "cam_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &cam_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es2_data = {
+       .parent = "ssi_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es2 = {
+       .name = "ssi_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ssi_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gpio4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 15,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio4_ick = {
+       .name = "gpio4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio4_ick_data,
+};
+
+static struct ti_clk_gate wdt1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 4,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt1_ick = {
+       .name = "wdt1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt1_ick_data,
+};
+
+static struct ti_clk_gate rng_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 2,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk rng_ick = {
+       .name = "rng_ick",
+       .type = TI_CLK_GATE,
+       .data = &rng_ick_data,
+};
+
+static struct ti_clk_gate icr_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 29,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk icr_ick = {
+       .name = "icr_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &icr_ick_data,
+};
+
+static struct ti_clk_gate sgx_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 0,
+       .reg = 0xb10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sgx_ick = {
+       .name = "sgx_ick",
+       .clkdm_name = "sgx_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sgx_ick_data,
+};
+
+static struct ti_clk_divider sys_clkout2_data = {
+       .parent = "clkout2_src_ck",
+       .bit_shift = 3,
+       .max_div = 64,
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_POWER_OF_TWO,
+};
+
+static struct ti_clk sys_clkout2 = {
+       .name = "sys_clkout2",
+       .type = TI_CLK_DIVIDER,
+       .data = &sys_clkout2_data,
+};
+
+static struct ti_clk_alias omap34xx_omap36xx_clks[] = {
+       CLK(NULL, "security_l4_ick2", &security_l4_ick2),
+       CLK(NULL, "aes1_ick", &aes1_ick),
+       CLK("omap_rng", "ick", &rng_ick),
+       CLK("omap3-rom-rng", "ick", &rng_ick),
+       CLK(NULL, "sha11_ick", &sha11_ick),
+       CLK(NULL, "des1_ick", &des1_ick),
+       CLK(NULL, "cam_mclk", &cam_mclk),
+       CLK(NULL, "cam_ick", &cam_ick),
+       CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
+       CLK(NULL, "security_l3_ick", &security_l3_ick),
+       CLK(NULL, "pka_ick", &pka_ick),
+       CLK(NULL, "icr_ick", &icr_ick),
+       CLK(NULL, "des2_ick", &des2_ick),
+       CLK(NULL, "mspro_ick", &mspro_ick),
+       CLK(NULL, "mailboxes_ick", &mailboxes_ick),
+       CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
+       CLK(NULL, "sr1_fck", &sr1_fck),
+       CLK(NULL, "sr2_fck", &sr2_fck),
+       CLK(NULL, "sr_l4_ick", &sr_l4_ick),
+       CLK(NULL, "dpll2_fck", &dpll2_fck),
+       CLK(NULL, "dpll2_ck", &dpll2_ck),
+       CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
+       CLK(NULL, "iva2_ck", &iva2_ck),
+       CLK(NULL, "modem_fck", &modem_fck),
+       CLK(NULL, "sad2d_ick", &sad2d_ick),
+       CLK(NULL, "mad2d_ick", &mad2d_ick),
+       CLK(NULL, "mspro_fck", &mspro_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = {
+       CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
+       CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
+       CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
+       CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
+       CLK(NULL, "sys_d2_ck", &sys_d2_ck),
+       CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck),
+       CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck),
+       CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck),
+       CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck),
+       CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck),
+       CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck),
+       CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck),
+       CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck),
+       CLK(NULL, "usim_fck", &usim_fck),
+       CLK(NULL, "usim_ick", &usim_ick),
+       { NULL },
+};
+
+static struct ti_clk_alias omap3xxx_clks[] = {
+       CLK(NULL, "apb_pclk", &dummy_apb_pclk),
+       CLK(NULL, "omap_32k_fck", &omap_32k_fck),
+       CLK(NULL, "virt_12m_ck", &virt_12m_ck),
+       CLK(NULL, "virt_13m_ck", &virt_13m_ck),
+       CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
+       CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
+       CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
+       CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
+       CLK(NULL, "osc_sys_ck", &osc_sys_ck),
+       CLK("twl", "fck", &osc_sys_ck),
+       CLK(NULL, "sys_ck", &sys_ck),
+       CLK(NULL, "timer_sys_ck", &sys_ck),
+       CLK(NULL, "dpll4_ck", &dpll4_ck),
+       CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
+       CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck),
+       CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
+       CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
+       CLK(NULL, "dpll3_ck", &dpll3_ck),
+       CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
+       CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck),
+       CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
+       CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
+       CLK(NULL, "sys_altclk", &sys_altclk),
+       CLK(NULL, "mcbsp_clks", &mcbsp_clks),
+       CLK(NULL, "sys_clkout1", &sys_clkout1),
+       CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
+       CLK(NULL, "core_ck", &core_ck),
+       CLK(NULL, "dpll1_fck", &dpll1_fck),
+       CLK(NULL, "dpll1_ck", &dpll1_ck),
+       CLK(NULL, "cpufreq_ck", &dpll1_ck),
+       CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
+       CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
+       CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
+       CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
+       CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
+       CLK(NULL, "cm_96m_fck", &cm_96m_fck),
+       CLK(NULL, "omap_96m_fck", &omap_96m_fck),
+       CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
+       CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck),
+       CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
+       CLK(NULL, "omap_54m_fck", &omap_54m_fck),
+       CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck),
+       CLK(NULL, "omap_48m_fck", &omap_48m_fck),
+       CLK(NULL, "omap_12m_fck", &omap_12m_fck),
+       CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
+       CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck),
+       CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
+       CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
+       CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck),
+       CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
+       CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
+       CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck),
+       CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
+       CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
+       CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
+       CLK(NULL, "sys_clkout2", &sys_clkout2),
+       CLK(NULL, "corex2_fck", &corex2_fck),
+       CLK(NULL, "mpu_ck", &mpu_ck),
+       CLK(NULL, "arm_fck", &arm_fck),
+       CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
+       CLK(NULL, "l3_ick", &l3_ick),
+       CLK(NULL, "l4_ick", &l4_ick),
+       CLK(NULL, "rm_ick", &rm_ick),
+       CLK(NULL, "timer_32k_ck", &omap_32k_fck),
+       CLK(NULL, "gpt10_fck", &gpt10_fck),
+       CLK(NULL, "gpt11_fck", &gpt11_fck),
+       CLK(NULL, "core_96m_fck", &core_96m_fck),
+       CLK(NULL, "mmchs2_fck", &mmchs2_fck),
+       CLK(NULL, "mmchs1_fck", &mmchs1_fck),
+       CLK(NULL, "i2c3_fck", &i2c3_fck),
+       CLK(NULL, "i2c2_fck", &i2c2_fck),
+       CLK(NULL, "i2c1_fck", &i2c1_fck),
+       CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
+       CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
+       CLK(NULL, "core_48m_fck", &core_48m_fck),
+       CLK(NULL, "mcspi4_fck", &mcspi4_fck),
+       CLK(NULL, "mcspi3_fck", &mcspi3_fck),
+       CLK(NULL, "mcspi2_fck", &mcspi2_fck),
+       CLK(NULL, "mcspi1_fck", &mcspi1_fck),
+       CLK(NULL, "uart2_fck", &uart2_fck),
+       CLK(NULL, "uart1_fck", &uart1_fck),
+       CLK(NULL, "core_12m_fck", &core_12m_fck),
+       CLK("omap_hdq.0", "fck", &hdq_fck),
+       CLK(NULL, "hdq_fck", &hdq_fck),
+       CLK(NULL, "core_l3_ick", &core_l3_ick),
+       CLK(NULL, "sdrc_ick", &sdrc_ick),
+       CLK(NULL, "gpmc_fck", &gpmc_fck),
+       CLK(NULL, "core_l4_ick", &core_l4_ick),
+       CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
+       CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
+       CLK(NULL, "mmchs2_ick", &mmchs2_ick),
+       CLK(NULL, "mmchs1_ick", &mmchs1_ick),
+       CLK("omap_hdq.0", "ick", &hdq_ick),
+       CLK(NULL, "hdq_ick", &hdq_ick),
+       CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
+       CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
+       CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
+       CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
+       CLK(NULL, "mcspi4_ick", &mcspi4_ick),
+       CLK(NULL, "mcspi3_ick", &mcspi3_ick),
+       CLK(NULL, "mcspi2_ick", &mcspi2_ick),
+       CLK(NULL, "mcspi1_ick", &mcspi1_ick),
+       CLK("omap_i2c.3", "ick", &i2c3_ick),
+       CLK("omap_i2c.2", "ick", &i2c2_ick),
+       CLK("omap_i2c.1", "ick", &i2c1_ick),
+       CLK(NULL, "i2c3_ick", &i2c3_ick),
+       CLK(NULL, "i2c2_ick", &i2c2_ick),
+       CLK(NULL, "i2c1_ick", &i2c1_ick),
+       CLK(NULL, "uart2_ick", &uart2_ick),
+       CLK(NULL, "uart1_ick", &uart1_ick),
+       CLK(NULL, "gpt11_ick", &gpt11_ick),
+       CLK(NULL, "gpt10_ick", &gpt10_ick),
+       CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
+       CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
+       CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
+       CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
+       CLK(NULL, "omapctrl_ick", &omapctrl_ick),
+       CLK(NULL, "dss_tv_fck", &dss_tv_fck),
+       CLK(NULL, "dss_96m_fck", &dss_96m_fck),
+       CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
+       CLK(NULL, "init_60m_fclk", &dummy_ck),
+       CLK(NULL, "gpt1_fck", &gpt1_fck),
+       CLK(NULL, "aes2_ick", &aes2_ick),
+       CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
+       CLK(NULL, "gpio1_dbck", &gpio1_dbck),
+       CLK(NULL, "sha12_ick", &sha12_ick),
+       CLK(NULL, "wdt2_fck", &wdt2_fck),
+       CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
+       CLK("omap_wdt", "ick", &wdt2_ick),
+       CLK(NULL, "wdt2_ick", &wdt2_ick),
+       CLK(NULL, "wdt1_ick", &wdt1_ick),
+       CLK(NULL, "gpio1_ick", &gpio1_ick),
+       CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
+       CLK(NULL, "gpt12_ick", &gpt12_ick),
+       CLK(NULL, "gpt1_ick", &gpt1_ick),
+       CLK(NULL, "per_96m_fck", &per_96m_fck),
+       CLK(NULL, "per_48m_fck", &per_48m_fck),
+       CLK(NULL, "uart3_fck", &uart3_fck),
+       CLK(NULL, "gpt2_fck", &gpt2_fck),
+       CLK(NULL, "gpt3_fck", &gpt3_fck),
+       CLK(NULL, "gpt4_fck", &gpt4_fck),
+       CLK(NULL, "gpt5_fck", &gpt5_fck),
+       CLK(NULL, "gpt6_fck", &gpt6_fck),
+       CLK(NULL, "gpt7_fck", &gpt7_fck),
+       CLK(NULL, "gpt8_fck", &gpt8_fck),
+       CLK(NULL, "gpt9_fck", &gpt9_fck),
+       CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
+       CLK(NULL, "gpio6_dbck", &gpio6_dbck),
+       CLK(NULL, "gpio5_dbck", &gpio5_dbck),
+       CLK(NULL, "gpio4_dbck", &gpio4_dbck),
+       CLK(NULL, "gpio3_dbck", &gpio3_dbck),
+       CLK(NULL, "gpio2_dbck", &gpio2_dbck),
+       CLK(NULL, "wdt3_fck", &wdt3_fck),
+       CLK(NULL, "per_l4_ick", &per_l4_ick),
+       CLK(NULL, "gpio6_ick", &gpio6_ick),
+       CLK(NULL, "gpio5_ick", &gpio5_ick),
+       CLK(NULL, "gpio4_ick", &gpio4_ick),
+       CLK(NULL, "gpio3_ick", &gpio3_ick),
+       CLK(NULL, "gpio2_ick", &gpio2_ick),
+       CLK(NULL, "wdt3_ick", &wdt3_ick),
+       CLK(NULL, "uart3_ick", &uart3_ick),
+       CLK(NULL, "uart4_ick", &uart4_ick),
+       CLK(NULL, "gpt9_ick", &gpt9_ick),
+       CLK(NULL, "gpt8_ick", &gpt8_ick),
+       CLK(NULL, "gpt7_ick", &gpt7_ick),
+       CLK(NULL, "gpt6_ick", &gpt6_ick),
+       CLK(NULL, "gpt5_ick", &gpt5_ick),
+       CLK(NULL, "gpt4_ick", &gpt4_ick),
+       CLK(NULL, "gpt3_ick", &gpt3_ick),
+       CLK(NULL, "gpt2_ick", &gpt2_ick),
+       CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
+       CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
+       CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
+       CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
+       CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
+       CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
+       CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
+       CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
+       CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
+       CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+       CLK("etb", "emu_src_ck", &emu_src_ck),
+       CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+       CLK(NULL, "emu_src_ck", &emu_src_ck),
+       CLK(NULL, "pclk_fck", &pclk_fck),
+       CLK(NULL, "pclkx2_fck", &pclkx2_fck),
+       CLK(NULL, "atclk_fck", &atclk_fck),
+       CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
+       CLK(NULL, "traceclk_fck", &traceclk_fck),
+       CLK(NULL, "secure_32k_fck", &secure_32k_fck),
+       CLK(NULL, "gpt12_fck", &gpt12_fck),
+       CLK(NULL, "wdt1_fck", &wdt1_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = {
+       CLK(NULL, "dpll5_ck", &dpll5_ck),
+       CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
+       CLK(NULL, "core_d3_ck", &core_d3_ck),
+       CLK(NULL, "core_d4_ck", &core_d4_ck),
+       CLK(NULL, "core_d6_ck", &core_d6_ck),
+       CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
+       CLK(NULL, "core_d2_ck", &core_d2_ck),
+       CLK(NULL, "corex2_d3_fck", &corex2_d3_fck),
+       CLK(NULL, "corex2_d5_fck", &corex2_d5_fck),
+       CLK(NULL, "sgx_fck", &sgx_fck),
+       CLK(NULL, "sgx_ick", &sgx_ick),
+       CLK(NULL, "cpefuse_fck", &cpefuse_fck),
+       CLK(NULL, "ts_fck", &ts_fck),
+       CLK(NULL, "usbtll_fck", &usbtll_fck),
+       CLK(NULL, "usbtll_ick", &usbtll_ick),
+       CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
+       CLK(NULL, "mmchs3_ick", &mmchs3_ick),
+       CLK(NULL, "mmchs3_fck", &mmchs3_fck),
+       CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
+       CLK("omapdss_dss", "ick", &dss_ick_3430es2),
+       CLK(NULL, "dss_ick", &dss_ick_3430es2),
+       CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
+       CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
+       CLK(NULL, "usbhost_ick", &usbhost_ick),
+       { NULL },
+};
+
+static struct ti_clk_alias omap3430es1_clks[] = {
+       CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
+       CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
+       CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
+       CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
+       CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
+       CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
+       CLK(NULL, "fshostusb_fck", &fshostusb_fck),
+       CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
+       CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
+       CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
+       CLK(NULL, "fac_ick", &fac_ick),
+       CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
+       CLK(NULL, "usb_l4_ick", &usb_l4_ick),
+       CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
+       CLK("omapdss_dss", "ick", &dss_ick_3430es1),
+       CLK(NULL, "dss_ick", &dss_ick_3430es1),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_clks[] = {
+       CLK(NULL, "uart4_fck", &uart4_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias am35xx_clks[] = {
+       CLK(NULL, "ipss_ick", &ipss_ick),
+       CLK(NULL, "rmii_ck", &rmii_ck),
+       CLK(NULL, "pclk_ck", &pclk_ck),
+       CLK(NULL, "emac_ick", &emac_ick),
+       CLK(NULL, "emac_fck", &emac_fck),
+       CLK("davinci_emac.0", NULL, &emac_ick),
+       CLK("davinci_mdio.0", NULL, &emac_fck),
+       CLK("vpfe-capture", "master", &vpfe_ick),
+       CLK("vpfe-capture", "slave", &vpfe_fck),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
+       CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
+       CLK(NULL, "hecc_ck", &hecc_ck),
+       CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
+       CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
+       { NULL },
+};
+
+static struct ti_clk *omap36xx_clk_patches[] = {
+       &dpll4_m3x2_ck_omap36xx,
+       &dpll3_m3x2_ck_omap36xx,
+       &dpll4_m6x2_ck_omap36xx,
+       &dpll4_m2x2_ck_omap36xx,
+       &dpll4_m5x2_ck_omap36xx,
+       &dpll4_ck_omap36xx,
+       NULL,
+};
+
+static const char *enable_init_clks[] = {
+       "sdrc_ick",
+       "gpmc_fck",
+       "omapctrl_ick",
+};
+
+static void __init omap3_clk_legacy_common_init(void)
+{
+       omap2_clk_disable_autoidle_all();
+
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+               (clk_get_rate(osc_sys_ck.clk) / 1000000),
+               (clk_get_rate(osc_sys_ck.clk) / 100000) % 10,
+               (clk_get_rate(core_ck.clk) / 1000000),
+               (clk_get_rate(arm_fck.clk) / 1000000));
+}
+
+int __init omap3430es1_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(omap3430es1_clks);
+       r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+
+       return r;
+}
+
+int __init omap3430_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
+
+int __init omap36xx_clk_legacy_init(void)
+{
+       int r;
+
+       ti_clk_patch_legacy_clks(omap36xx_clk_patches);
+       r = ti_clk_register_legacy_clks(omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
+
+int __init am35xx_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(am35xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
index 0d1750a8aea40db16a470c697418d1dbd8b6bb45..383a06e49b09db95e465260369fe307afebb92e2 100644 (file)
@@ -327,7 +327,6 @@ enum {
        OMAP3_SOC_OMAP3430_ES1,
        OMAP3_SOC_OMAP3430_ES2_PLUS,
        OMAP3_SOC_OMAP3630,
-       OMAP3_SOC_TI81XX,
 };
 
 static int __init omap3xxx_dt_clk_init(int soc_type)
@@ -370,7 +369,7 @@ static int __init omap3xxx_dt_clk_init(int soc_type)
                (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
                (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
 
-       if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+       if (soc_type != OMAP3_SOC_OMAP3430_ES1)
                omap3_clk_lock_dpll5();
 
        return 0;
@@ -390,8 +389,3 @@ int __init am35xx_dt_clk_init(void)
 {
        return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
 }
-
-int __init ti81xx_dt_clk_init(void)
-{
-       return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
-}
index 02517a8206bda8eda55ef32aac17f8163dc2d064..4f4c87751db521d0ce05dcd82e2a62e81408f9d9 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
index 5e183993e3ec56b926fffd252325000d02566d03..14160b2235480f1b2855a5c1f3d16865321ce18a 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/clk/ti.h>
index 62ac8f6e480c61abf1760aeec2ebc980fd716cc3..ee32f4deebf40280be8e4d4e6e454e815c53cd67 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644 (file)
index 0000000..9451e65
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk dm816x_clks[] = {
+       DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
+       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+       DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+       DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+       DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+       DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+       DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+       DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+       DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+       DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
+       DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"),
+       DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
+       DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
+       DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
+       DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"),
+       DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"),
+       { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+       "ddr_pll_clk1",
+       "ddr_pll_clk2",
+       "ddr_pll_clk3",
+};
+
+int __init ti81xx_dt_clk_init(void)
+{
+       ti_dt_clocks_register(dm816x_clks);
+       omap2_clk_disable_autoidle_all();
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       return 0;
+}
index 337abe5909e1f272cdcdca85e5ae0fb3fe1f4729..e22b95646e09a8357e5adb3786f726c8e6baab10 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -183,3 +185,128 @@ void ti_dt_clk_init_retry_clks(void)
                retries--;
        }
 }
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
+{
+       while (*patch) {
+               memcpy((*patch)->patch, *patch, sizeof(**patch));
+               patch++;
+       }
+}
+
+struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
+{
+       struct clk *clk;
+       struct ti_clk_fixed *fixed;
+       struct ti_clk_fixed_factor *fixed_factor;
+       struct clk_hw *clk_hw;
+
+       if (setup->clk)
+               return setup->clk;
+
+       switch (setup->type) {
+       case TI_CLK_FIXED:
+               fixed = setup->data;
+
+               clk = clk_register_fixed_rate(NULL, setup->name, NULL,
+                                             CLK_IS_ROOT, fixed->frequency);
+               break;
+       case TI_CLK_MUX:
+               clk = ti_clk_register_mux(setup);
+               break;
+       case TI_CLK_DIVIDER:
+               clk = ti_clk_register_divider(setup);
+               break;
+       case TI_CLK_COMPOSITE:
+               clk = ti_clk_register_composite(setup);
+               break;
+       case TI_CLK_FIXED_FACTOR:
+               fixed_factor = setup->data;
+
+               clk = clk_register_fixed_factor(NULL, setup->name,
+                                               fixed_factor->parent,
+                                               0, fixed_factor->mult,
+                                               fixed_factor->div);
+               break;
+       case TI_CLK_GATE:
+               clk = ti_clk_register_gate(setup);
+               break;
+       case TI_CLK_DPLL:
+               clk = ti_clk_register_dpll(setup);
+               break;
+       default:
+               pr_err("bad type for %s!\n", setup->name);
+               clk = ERR_PTR(-EINVAL);
+       }
+
+       if (!IS_ERR(clk)) {
+               setup->clk = clk;
+               if (setup->clkdm_name) {
+                       if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+                               pr_warn("can't setup clkdm for basic clk %s\n",
+                                       setup->name);
+                       } else {
+                               clk_hw = __clk_get_hw(clk);
+                               to_clk_hw_omap(clk_hw)->clkdm_name =
+                                       setup->clkdm_name;
+                               omap2_init_clk_clkdm(clk_hw);
+                       }
+               }
+       }
+
+       return clk;
+}
+
+int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
+{
+       struct clk *clk;
+       bool retry;
+       struct ti_clk_alias *retry_clk;
+       struct ti_clk_alias *tmp;
+
+       while (clks->clk) {
+               clk = ti_clk_register_clk(clks->clk);
+               if (IS_ERR(clk)) {
+                       if (PTR_ERR(clk) == -EAGAIN) {
+                               list_add(&clks->link, &retry_list);
+                       } else {
+                               pr_err("register for %s failed: %ld\n",
+                                      clks->clk->name, PTR_ERR(clk));
+                               return PTR_ERR(clk);
+                       }
+               } else {
+                       clks->lk.clk = clk;
+                       clkdev_add(&clks->lk);
+               }
+               clks++;
+       }
+
+       retry = true;
+
+       while (!list_empty(&retry_list) && retry) {
+               retry = false;
+               list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
+                       pr_debug("retry-init: %s\n", retry_clk->clk->name);
+                       clk = ti_clk_register_clk(retry_clk->clk);
+                       if (IS_ERR(clk)) {
+                               if (PTR_ERR(clk) == -EAGAIN) {
+                                       continue;
+                               } else {
+                                       pr_err("register for %s failed: %ld\n",
+                                              retry_clk->clk->name,
+                                              PTR_ERR(clk));
+                                       return PTR_ERR(clk);
+                               }
+                       } else {
+                               retry = true;
+                               retry_clk->lk.clk = clk;
+                               clkdev_add(&retry_clk->lk);
+                               list_del(&retry_clk->link);
+                       }
+               }
+       }
+
+       return 0;
+}
+#endif
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644 (file)
index 0000000..404158d
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * TI Clock driver internal definitions
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_CLK_TI_CLOCK__
+#define __DRIVERS_CLK_TI_CLOCK__
+
+enum {
+       TI_CLK_FIXED,
+       TI_CLK_MUX,
+       TI_CLK_DIVIDER,
+       TI_CLK_COMPOSITE,
+       TI_CLK_FIXED_FACTOR,
+       TI_CLK_GATE,
+       TI_CLK_DPLL,
+};
+
+/* Global flags */
+#define CLKF_INDEX_POWER_OF_TWO                (1 << 0)
+#define CLKF_INDEX_STARTS_AT_ONE       (1 << 1)
+#define CLKF_SET_RATE_PARENT           (1 << 2)
+#define CLKF_OMAP3                     (1 << 3)
+#define CLKF_AM35XX                    (1 << 4)
+
+/* Gate flags */
+#define CLKF_SET_BIT_TO_DISABLE                (1 << 5)
+#define CLKF_INTERFACE                 (1 << 6)
+#define CLKF_SSI                       (1 << 7)
+#define CLKF_DSS                       (1 << 8)
+#define CLKF_HSOTGUSB                  (1 << 9)
+#define CLKF_WAIT                      (1 << 10)
+#define CLKF_NO_WAIT                   (1 << 11)
+#define CLKF_HSDIV                     (1 << 12)
+#define CLKF_CLKDM                     (1 << 13)
+
+/* DPLL flags */
+#define CLKF_LOW_POWER_STOP            (1 << 5)
+#define CLKF_LOCK                      (1 << 6)
+#define CLKF_LOW_POWER_BYPASS          (1 << 7)
+#define CLKF_PER                       (1 << 8)
+#define CLKF_CORE                      (1 << 9)
+#define CLKF_J_TYPE                    (1 << 10)
+
+#define CLK(dev, con, ck)              \
+       {                               \
+               .lk = {                 \
+                       .dev_id = dev,  \
+                       .con_id = con,  \
+               },                      \
+               .clk = ck,              \
+       }
+
+struct ti_clk {
+       const char *name;
+       const char *clkdm_name;
+       int type;
+       void *data;
+       struct ti_clk *patch;
+       struct clk *clk;
+};
+
+struct ti_clk_alias {
+       struct ti_clk *clk;
+       struct clk_lookup lk;
+       struct list_head link;
+};
+
+struct ti_clk_fixed {
+       u32 frequency;
+       u16 flags;
+};
+
+struct ti_clk_mux {
+       u8 bit_shift;
+       int num_parents;
+       u16 reg;
+       u8 module;
+       const char **parents;
+       u16 flags;
+};
+
+struct ti_clk_divider {
+       const char *parent;
+       u8 bit_shift;
+       u16 max_div;
+       u16 reg;
+       u8 module;
+       int *dividers;
+       int num_dividers;
+       u16 flags;
+};
+
+struct ti_clk_fixed_factor {
+       const char *parent;
+       u16 div;
+       u16 mult;
+       u16 flags;
+};
+
+struct ti_clk_gate {
+       const char *parent;
+       u8 bit_shift;
+       u16 reg;
+       u8 module;
+       u16 flags;
+};
+
+struct ti_clk_composite {
+       struct ti_clk_divider *divider;
+       struct ti_clk_mux *mux;
+       struct ti_clk_gate *gate;
+       u16 flags;
+};
+
+struct ti_clk_clkdm_gate {
+       const char *parent;
+       u16 flags;
+};
+
+struct ti_clk_dpll {
+       int num_parents;
+       u16 control_reg;
+       u16 idlest_reg;
+       u16 autoidle_reg;
+       u16 mult_div1_reg;
+       u8 module;
+       const char **parents;
+       u16 flags;
+       u8 modes;
+       u32 mult_mask;
+       u32 div1_mask;
+       u32 enable_mask;
+       u32 autoidle_mask;
+       u32 freqsel_mask;
+       u32 idlest_mask;
+       u32 dco_mask;
+       u32 sddiv_mask;
+       u16 max_multiplier;
+       u16 max_divider;
+       u8 min_divider;
+       u8 auto_recal_bit;
+       u8 recal_en_bit;
+       u8 recal_st_bit;
+};
+
+struct clk *ti_clk_register_gate(struct ti_clk *setup);
+struct clk *ti_clk_register_interface(struct ti_clk *setup);
+struct clk *ti_clk_register_mux(struct ti_clk *setup);
+struct clk *ti_clk_register_divider(struct ti_clk *setup);
+struct clk *ti_clk_register_composite(struct ti_clk *setup);
+struct clk *ti_clk_register_dpll(struct ti_clk *setup);
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
+
+void ti_clk_patch_legacy_clks(struct ti_clk **patch);
+struct clk *ti_clk_register_clk(struct ti_clk *setup);
+int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
+
+#endif
index 19d8980ba458ef4e37f69cd38ba9fe6093d3ecfb..3654f61912ebb7d099771f5f5e7fba9a5bbc4e92 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/clk/ti.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -116,8 +118,46 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
 
 #define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
 
-static void __init ti_clk_register_composite(struct clk_hw *hw,
-                                            struct device_node *node)
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_composite(struct ti_clk *setup)
+{
+       struct ti_clk_composite *comp;
+       struct clk_hw *gate;
+       struct clk_hw *mux;
+       struct clk_hw *div;
+       int num_parents = 1;
+       const char **parent_names = NULL;
+       struct clk *clk;
+
+       comp = setup->data;
+
+       div = ti_clk_build_component_div(comp->divider);
+       gate = ti_clk_build_component_gate(comp->gate);
+       mux = ti_clk_build_component_mux(comp->mux);
+
+       if (div)
+               parent_names = &comp->divider->parent;
+
+       if (gate)
+               parent_names = &comp->gate->parent;
+
+       if (mux) {
+               num_parents = comp->mux->num_parents;
+               parent_names = comp->mux->parents;
+       }
+
+       clk = clk_register_composite(NULL, setup->name,
+                                    parent_names, num_parents, mux,
+                                    &ti_clk_mux_ops, div,
+                                    &ti_composite_divider_ops, gate,
+                                    &ti_composite_gate_ops, 0);
+
+       return clk;
+}
+#endif
+
+static void __init _register_composite(struct clk_hw *hw,
+                                      struct device_node *node)
 {
        struct clk *clk;
        struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
@@ -136,7 +176,7 @@ static void __init ti_clk_register_composite(struct clk_hw *hw,
                        pr_debug("component %s not ready for %s, retry\n",
                                 cclk->comp_nodes[i]->name, node->name);
                        if (!ti_clk_retry_init(node, hw,
-                                              ti_clk_register_composite))
+                                              _register_composite))
                                return;
 
                        goto cleanup;
@@ -216,7 +256,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node)
        for (i = 0; i < num_clks; i++)
                cclk->comp_nodes[i] = _get_component_node(node, i);
 
-       ti_clk_register_composite(&cclk->hw, node);
+       _register_composite(&cclk->hw, node);
 }
 CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
               of_ti_composite_clk_setup);
index bff2b5b8ff598b2e150496eb3ebc984c651a0ebd..6211893c0980665749aa2c08577f9f3c17da4a78 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -300,6 +301,134 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        return clk;
 }
 
+static struct clk_div_table *
+_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width)
+{
+       int valid_div = 0;
+       struct clk_div_table *table;
+       int i;
+       int div;
+       u32 val;
+       u8 flags;
+
+       if (!setup->num_dividers) {
+               /* Clk divider table not provided, determine min/max divs */
+               flags = setup->flags;
+
+               if (flags & CLKF_INDEX_STARTS_AT_ONE)
+                       val = 1;
+               else
+                       val = 0;
+
+               div = 1;
+
+               while (div < setup->max_div) {
+                       if (flags & CLKF_INDEX_POWER_OF_TWO)
+                               div <<= 1;
+                       else
+                               div++;
+                       val++;
+               }
+
+               *width = fls(val);
+
+               return NULL;
+       }
+
+       for (i = 0; i < setup->num_dividers; i++)
+               if (setup->dividers[i])
+                       valid_div++;
+
+       table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       valid_div = 0;
+       *width = 0;
+
+       for (i = 0; i < setup->num_dividers; i++)
+               if (setup->dividers[i]) {
+                       table[valid_div].div = setup->dividers[i];
+                       table[valid_div].val = i;
+                       valid_div++;
+                       *width = i;
+               }
+
+       *width = fls(*width);
+
+       return table;
+}
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
+{
+       struct clk_divider *div;
+       struct clk_omap_reg *reg;
+
+       if (!setup)
+               return NULL;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&div->reg;
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+               div->flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (setup->flags & CLKF_INDEX_POWER_OF_TWO)
+               div->flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       div->table = _get_div_table_from_setup(setup, &div->width);
+
+       div->shift = setup->bit_shift;
+
+       return &div->hw;
+}
+
+struct clk *ti_clk_register_divider(struct ti_clk *setup)
+{
+       struct ti_clk_divider *div;
+       struct clk_omap_reg *reg_setup;
+       u32 reg;
+       u8 width;
+       u32 flags = 0;
+       u8 div_flags = 0;
+       struct clk_div_table *table;
+       struct clk *clk;
+
+       div = setup->data;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       reg_setup->index = div->module;
+       reg_setup->offset = div->reg;
+
+       if (div->flags & CLKF_INDEX_STARTS_AT_ONE)
+               div_flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (div->flags & CLKF_INDEX_POWER_OF_TWO)
+               div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       if (div->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       table = _get_div_table_from_setup(div, &width);
+       if (IS_ERR(table))
+               return (struct clk *)table;
+
+       clk = _register_divider(NULL, setup->name, div->parent,
+                               flags, (void __iomem *)reg, div->bit_shift,
+                               width, div_flags, table, NULL);
+
+       if (IS_ERR(clk))
+               kfree(table);
+
+       return clk;
+}
+
 static struct clk_div_table *
 __init ti_clk_get_div_table(struct device_node *node)
 {
@@ -455,7 +584,8 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
                goto cleanup;
 
        clk = _register_divider(NULL, node->name, parent_name, flags, reg,
-                               shift, width, clk_divider_flags, table, NULL);
+                               shift, width, clk_divider_flags, table,
+                               NULL);
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
index 85ac0dd501dea5fff98801ba47edabbf972492a5..81dc4698dc41740e77e82411b00c67ae9aa148ba 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -130,7 +131,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
 };
 
 /**
- * ti_clk_register_dpll - low level registration of a DPLL clock
+ * _register_dpll - low level registration of a DPLL clock
  * @hw: hardware clock definition for the clock
  * @node: device node for the clock
  *
@@ -138,8 +139,8 @@ static const struct clk_ops dpll_x2_ck_ops = {
  * clk-bypass is missing), the clock is added to retry list and
  * the initialization is retried on later stage.
  */
-static void __init ti_clk_register_dpll(struct clk_hw *hw,
-                                       struct device_node *node)
+static void __init _register_dpll(struct clk_hw *hw,
+                                 struct device_node *node)
 {
        struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
        struct dpll_data *dd = clk_hw->dpll_data;
@@ -151,7 +152,7 @@ static void __init ti_clk_register_dpll(struct clk_hw *hw,
        if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
                pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
                         node->name);
-               if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+               if (!ti_clk_retry_init(node, hw, _register_dpll))
                        return;
 
                goto cleanup;
@@ -175,20 +176,118 @@ cleanup:
        kfree(clk_hw);
 }
 
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __iomem *_get_reg(u8 module, u16 offset)
+{
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       reg_setup->index = module;
+       reg_setup->offset = offset;
+
+       return (void __iomem *)reg;
+}
+
+struct clk *ti_clk_register_dpll(struct ti_clk *setup)
+{
+       struct clk_hw_omap *clk_hw;
+       struct clk_init_data init = { NULL };
+       struct dpll_data *dd;
+       struct clk *clk;
+       struct ti_clk_dpll *dpll;
+       const struct clk_ops *ops = &omap3_dpll_ck_ops;
+       struct clk *clk_ref;
+       struct clk *clk_bypass;
+
+       dpll = setup->data;
+
+       if (dpll->num_parents < 2)
+               return ERR_PTR(-EINVAL);
+
+       clk_ref = clk_get_sys(NULL, dpll->parents[0]);
+       clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
+
+       if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
+               return ERR_PTR(-EAGAIN);
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       if (!dd || !clk_hw) {
+               clk = ERR_PTR(-ENOMEM);
+               goto cleanup;
+       }
+
+       clk_hw->dpll_data = dd;
+       clk_hw->ops = &clkhwops_omap3_dpll;
+       clk_hw->hw.init = &init;
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       init.name = setup->name;
+       init.ops = ops;
+
+       init.num_parents = dpll->num_parents;
+       init.parent_names = dpll->parents;
+
+       dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
+       dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
+       dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
+       dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
+
+       dd->modes = dpll->modes;
+       dd->div1_mask = dpll->div1_mask;
+       dd->idlest_mask = dpll->idlest_mask;
+       dd->mult_mask = dpll->mult_mask;
+       dd->autoidle_mask = dpll->autoidle_mask;
+       dd->enable_mask = dpll->enable_mask;
+       dd->sddiv_mask = dpll->sddiv_mask;
+       dd->dco_mask = dpll->dco_mask;
+       dd->max_divider = dpll->max_divider;
+       dd->min_divider = dpll->min_divider;
+       dd->max_multiplier = dpll->max_multiplier;
+       dd->auto_recal_bit = dpll->auto_recal_bit;
+       dd->recal_en_bit = dpll->recal_en_bit;
+       dd->recal_st_bit = dpll->recal_st_bit;
+
+       dd->clk_ref = clk_ref;
+       dd->clk_bypass = clk_bypass;
+
+       if (dpll->flags & CLKF_CORE)
+               ops = &omap3_dpll_core_ck_ops;
+
+       if (dpll->flags & CLKF_PER)
+               ops = &omap3_dpll_per_ck_ops;
+
+       if (dpll->flags & CLKF_J_TYPE)
+               dd->flags |= DPLL_J_TYPE;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (!IS_ERR(clk))
+               return clk;
+
+cleanup:
+       kfree(dd);
+       kfree(clk_hw);
+       return clk;
+}
+#endif
+
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
        defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
        defined(CONFIG_SOC_AM43XX)
 /**
- * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * _register_dpll_x2 - Registers a DPLLx2 clock
  * @node: device node for this clock
  * @ops: clk_ops for this clock
  * @hw_ops: clk_hw_ops for this clock
  *
  * Initializes a DPLL x 2 clock from device tree data.
  */
-static void ti_clk_register_dpll_x2(struct device_node *node,
-                                   const struct clk_ops *ops,
-                                   const struct clk_hw_omap_ops *hw_ops)
+static void _register_dpll_x2(struct device_node *node,
+                             const struct clk_ops *ops,
+                             const struct clk_hw_omap_ops *hw_ops)
 {
        struct clk *clk;
        struct clk_init_data init = { NULL };
@@ -318,7 +417,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
        if (dpll_mode)
                dd->modes = dpll_mode;
 
-       ti_clk_register_dpll(&clk_hw->hw, node);
+       _register_dpll(&clk_hw->hw, node);
        return;
 
 cleanup:
@@ -332,7 +431,7 @@ cleanup:
        defined(CONFIG_SOC_DRA7XX)
 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
 {
-       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+       _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
 }
 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
               of_ti_omap4_dpll_x2_setup);
@@ -341,7 +440,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
 {
-       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+       _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
 }
 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
               of_ti_am3_dpll_x2_setup);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644 (file)
index 0000000..6ef8963
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <asm/div64.h>
+
+/* FAPLL Control Register PLL_CTRL */
+#define FAPLL_MAIN_LOCK                BIT(7)
+#define FAPLL_MAIN_PLLEN       BIT(3)
+#define FAPLL_MAIN_BP          BIT(2)
+#define FAPLL_MAIN_LOC_CTL     BIT(0)
+
+/* FAPLL powerdown register PWD */
+#define FAPLL_PWD_OFFSET       4
+
+#define MAX_FAPLL_OUTPUTS      7
+#define FAPLL_MAX_RETRIES      1000
+
+#define to_fapll(_hw)          container_of(_hw, struct fapll_data, hw)
+#define to_synth(_hw)          container_of(_hw, struct fapll_synth, hw)
+
+/* The bypass bit is inverted on the ddr_pll.. */
+#define fapll_is_ddr_pll(va)   (((u32)(va) & 0xffff) == 0x0440)
+
+/*
+ * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
+ * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
+ */
+#define is_ddr_pll_clk1(va)    (((u32)(va) & 0xffff) == 0x044c)
+#define is_audio_pll_clk1(va)  (((u32)(va) & 0xffff) == 0x04a8)
+
+/* Synthesizer divider register */
+#define SYNTH_LDMDIV1          BIT(8)
+
+/* Synthesizer frequency register */
+#define SYNTH_LDFREQ           BIT(31)
+
+struct fapll_data {
+       struct clk_hw hw;
+       void __iomem *base;
+       const char *name;
+       struct clk *clk_ref;
+       struct clk *clk_bypass;
+       struct clk_onecell_data outputs;
+       bool bypass_bit_inverted;
+};
+
+struct fapll_synth {
+       struct clk_hw hw;
+       struct fapll_data *fd;
+       int index;
+       void __iomem *freq;
+       void __iomem *div;
+       const char *name;
+       struct clk *clk_pll;
+};
+
+static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
+{
+       u32 v = readl_relaxed(fd->base);
+
+       if (fd->bypass_bit_inverted)
+               return !(v & FAPLL_MAIN_BP);
+       else
+               return !!(v & FAPLL_MAIN_BP);
+}
+
+static int ti_fapll_enable(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       v |= (1 << FAPLL_MAIN_PLLEN);
+       writel_relaxed(v, fd->base);
+
+       return 0;
+}
+
+static void ti_fapll_disable(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       v &= ~(1 << FAPLL_MAIN_PLLEN);
+       writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_is_enabled(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       return v & (1 << FAPLL_MAIN_PLLEN);
+}
+
+static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 fapll_n, fapll_p, v;
+       long long rate;
+
+       if (ti_fapll_clock_is_bypass(fd))
+               return parent_rate;
+
+       rate = parent_rate;
+
+       /* PLL pre-divider is P and multiplier is N */
+       v = readl_relaxed(fd->base);
+       fapll_p = (v >> 8) & 0xff;
+       if (fapll_p)
+               do_div(rate, fapll_p);
+       fapll_n = v >> 16;
+       if (fapll_n)
+               rate *= fapll_n;
+
+       return rate;
+}
+
+static u8 ti_fapll_get_parent(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+
+       if (ti_fapll_clock_is_bypass(fd))
+               return 1;
+
+       return 0;
+}
+
+static struct clk_ops ti_fapll_ops = {
+       .enable = ti_fapll_enable,
+       .disable = ti_fapll_disable,
+       .is_enabled = ti_fapll_is_enabled,
+       .recalc_rate = ti_fapll_recalc_rate,
+       .get_parent = ti_fapll_get_parent,
+};
+
+static int ti_fapll_synth_enable(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       v &= ~(1 << synth->index);
+       writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+
+       return 0;
+}
+
+static void ti_fapll_synth_disable(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       v |= 1 << synth->index;
+       writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+}
+
+static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       return !(v & (1 << synth->index));
+}
+
+/*
+ * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
+ */
+static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 synth_div_m;
+       long long rate;
+
+       /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
+       if (!synth->div)
+               return 32768;
+
+       /*
+        * PLL in bypass sets the synths in bypass mode too. The PLL rate
+        * can be also be set to 27MHz, so we can't use parent_rate to
+        * check for bypass mode.
+        */
+       if (ti_fapll_clock_is_bypass(synth->fd))
+               return parent_rate;
+
+       rate = parent_rate;
+
+       /*
+        * Synth frequency integer and fractional divider.
+        * Note that the phase output K is 8, so the result needs
+        * to be multiplied by 8.
+        */
+       if (synth->freq) {
+               u32 v, synth_int_div, synth_frac_div, synth_div_freq;
+
+               v = readl_relaxed(synth->freq);
+               synth_int_div = (v >> 24) & 0xf;
+               synth_frac_div = v & 0xffffff;
+               synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
+               rate *= 10000000;
+               do_div(rate, synth_div_freq);
+               rate *= 8;
+       }
+
+       /* Synth ost-divider M */
+       synth_div_m = readl_relaxed(synth->div) & 0xff;
+       do_div(rate, synth_div_m);
+
+       return rate;
+}
+
+static struct clk_ops ti_fapll_synt_ops = {
+       .enable = ti_fapll_synth_enable,
+       .disable = ti_fapll_synth_disable,
+       .is_enabled = ti_fapll_synth_is_enabled,
+       .recalc_rate = ti_fapll_synth_recalc_rate,
+};
+
+static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
+                                               void __iomem *freq,
+                                               void __iomem *div,
+                                               int index,
+                                               const char *name,
+                                               const char *parent,
+                                               struct clk *pll_clk)
+{
+       struct clk_init_data *init;
+       struct fapll_synth *synth;
+
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!init)
+               return ERR_PTR(-ENOMEM);
+
+       init->ops = &ti_fapll_synt_ops;
+       init->name = name;
+       init->parent_names = &parent;
+       init->num_parents = 1;
+
+       synth = kzalloc(sizeof(*synth), GFP_KERNEL);
+       if (!synth)
+               goto free;
+
+       synth->fd = fd;
+       synth->index = index;
+       synth->freq = freq;
+       synth->div = div;
+       synth->name = name;
+       synth->hw.init = init;
+       synth->clk_pll = pll_clk;
+
+       return clk_register(NULL, &synth->hw);
+
+free:
+       kfree(synth);
+       kfree(init);
+
+       return ERR_PTR(-ENOMEM);
+}
+
+static void __init ti_fapll_setup(struct device_node *node)
+{
+       struct fapll_data *fd;
+       struct clk_init_data *init = NULL;
+       const char *parent_name[2];
+       struct clk *pll_clk;
+       int i;
+
+       fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+       if (!fd)
+               return;
+
+       fd->outputs.clks = kzalloc(sizeof(struct clk *) *
+                                  MAX_FAPLL_OUTPUTS + 1,
+                                  GFP_KERNEL);
+       if (!fd->outputs.clks)
+               goto free;
+
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!init)
+               goto free;
+
+       init->ops = &ti_fapll_ops;
+       init->name = node->name;
+
+       init->num_parents = of_clk_get_parent_count(node);
+       if (init->num_parents != 2) {
+               pr_err("%s must have two parents\n", node->name);
+               goto free;
+       }
+
+       parent_name[0] = of_clk_get_parent_name(node, 0);
+       parent_name[1] = of_clk_get_parent_name(node, 1);
+       init->parent_names = parent_name;
+
+       fd->clk_ref = of_clk_get(node, 0);
+       if (IS_ERR(fd->clk_ref)) {
+               pr_err("%s could not get clk_ref\n", node->name);
+               goto free;
+       }
+
+       fd->clk_bypass = of_clk_get(node, 1);
+       if (IS_ERR(fd->clk_bypass)) {
+               pr_err("%s could not get clk_bypass\n", node->name);
+               goto free;
+       }
+
+       fd->base = of_iomap(node, 0);
+       if (!fd->base) {
+               pr_err("%s could not get IO base\n", node->name);
+               goto free;
+       }
+
+       if (fapll_is_ddr_pll(fd->base))
+               fd->bypass_bit_inverted = true;
+
+       fd->name = node->name;
+       fd->hw.init = init;
+
+       /* Register the parent PLL */
+       pll_clk = clk_register(NULL, &fd->hw);
+       if (IS_ERR(pll_clk))
+               goto unmap;
+
+       fd->outputs.clks[0] = pll_clk;
+       fd->outputs.clk_num++;
+
+       /*
+        * Set up the child synthesizers starting at index 1 as the
+        * PLL output is at index 0. We need to check the clock-indices
+        * for numbering in case there are holes in the synth mapping,
+        * and then probe the synth register to see if it has a FREQ
+        * register available.
+        */
+       for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
+               const char *output_name;
+               void __iomem *freq, *div;
+               struct clk *synth_clk;
+               int output_instance;
+               u32 v;
+
+               if (of_property_read_string_index(node, "clock-output-names",
+                                                 i, &output_name))
+                       continue;
+
+               if (of_property_read_u32_index(node, "clock-indices", i,
+                                              &output_instance))
+                       output_instance = i;
+
+               freq = fd->base + (output_instance * 8);
+               div = freq + 4;
+
+               /* Check for hardwired audio_pll_clk1 */
+               if (is_audio_pll_clk1(freq)) {
+                       freq = 0;
+                       div = 0;
+               } else {
+                       /* Does the synthesizer have a FREQ register? */
+                       v = readl_relaxed(freq);
+                       if (!v)
+                               freq = 0;
+               }
+               synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
+                                                output_name, node->name,
+                                                pll_clk);
+               if (IS_ERR(synth_clk))
+                       continue;
+
+               fd->outputs.clks[output_instance] = synth_clk;
+               fd->outputs.clk_num++;
+
+               clk_register_clkdev(synth_clk, output_name, NULL);
+       }
+
+       /* Register the child synthesizers as the FAPLL outputs */
+       of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
+       /* Add clock alias for the outputs */
+
+       kfree(init);
+
+       return;
+
+unmap:
+       iounmap(fd->base);
+free:
+       if (fd->clk_bypass)
+               clk_put(fd->clk_bypass);
+       if (fd->clk_ref)
+               clk_put(fd->clk_ref);
+       kfree(fd->outputs.clks);
+       kfree(fd);
+       kfree(init);
+}
+
+CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
index b326d2797feb23c0ed84fe52be5b9905102f0234..d493307b73f42b0bdef4c36544e5146ce00b977e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 #undef pr_fmt
@@ -90,63 +92,164 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
        return ret;
 }
 
-static void __init _of_ti_gate_clk_setup(struct device_node *node,
-                                        const struct clk_ops *ops,
-                                        const struct clk_hw_omap_ops *hw_ops)
+static struct clk *_register_gate(struct device *dev, const char *name,
+                                 const char *parent_name, unsigned long flags,
+                                 void __iomem *reg, u8 bit_idx,
+                                 u8 clk_gate_flags, const struct clk_ops *ops,
+                                 const struct clk_hw_omap_ops *hw_ops)
 {
-       struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *clk_name = node->name;
-       const char *parent_name;
-       u32 val;
+       struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw)
-               return;
+               return ERR_PTR(-ENOMEM);
 
        clk_hw->hw.init = &init;
 
-       init.name = clk_name;
+       init.name = name;
        init.ops = ops;
 
-       if (ops != &omap_gate_clkdm_clk_ops) {
-               clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-               if (!clk_hw->enable_reg)
-                       goto cleanup;
+       clk_hw->enable_reg = reg;
+       clk_hw->enable_bit = bit_idx;
+       clk_hw->ops = hw_ops;
 
-               if (!of_property_read_u32(node, "ti,bit-shift", &val))
-                       clk_hw->enable_bit = val;
+       clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags;
+
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       init.flags = flags;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (IS_ERR(clk))
+               kfree(clk_hw);
+
+       return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_gate(struct ti_clk *setup)
+{
+       const struct clk_ops *ops = &omap_gate_clk_ops;
+       const struct clk_hw_omap_ops *hw_ops = NULL;
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+       u32 flags = 0;
+       u8 clk_gate_flags = 0;
+       struct ti_clk_gate *gate;
+
+       gate = setup->data;
+
+       if (gate->flags & CLKF_INTERFACE)
+               return ti_clk_register_interface(setup);
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       if (gate->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
+               clk_gate_flags |= INVERT_ENABLE;
+
+       if (gate->flags & CLKF_HSDIV) {
+               ops = &omap_gate_clk_hsdiv_restore_ops;
+               hw_ops = &clkhwops_wait;
        }
 
-       clk_hw->ops = hw_ops;
+       if (gate->flags & CLKF_DSS)
+               hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
+
+       if (gate->flags & CLKF_WAIT)
+               hw_ops = &clkhwops_wait;
+
+       if (gate->flags & CLKF_CLKDM)
+               ops = &omap_gate_clkdm_clk_ops;
+
+       if (gate->flags & CLKF_AM35XX)
+               hw_ops = &clkhwops_am35xx_ipss_module_wait;
 
-       clk_hw->flags = MEMMAP_ADDRESSING;
+       reg_setup->index = gate->module;
+       reg_setup->offset = gate->reg;
+
+       return _register_gate(NULL, setup->name, gate->parent, flags,
+                             (void __iomem *)reg, gate->bit_shift,
+                             clk_gate_flags, ops, hw_ops);
+}
+
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
+{
+       struct clk_hw_omap *gate;
+       struct clk_omap_reg *reg;
+       const struct clk_hw_omap_ops *ops = &clkhwops_wait;
+
+       if (!setup)
+               return NULL;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&gate->enable_reg;
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       gate->enable_bit = setup->bit_shift;
+
+       if (setup->flags & CLKF_NO_WAIT)
+               ops = NULL;
+
+       if (setup->flags & CLKF_INTERFACE)
+               ops = &clkhwops_iclk_wait;
+
+       gate->ops = ops;
+       gate->flags = MEMMAP_ADDRESSING;
+
+       return &gate->hw;
+}
+#endif
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+                                        const struct clk_ops *ops,
+                                        const struct clk_hw_omap_ops *hw_ops)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg = NULL;
+       u8 enable_bit = 0;
+       u32 val;
+       u32 flags = 0;
+       u8 clk_gate_flags = 0;
+
+       if (ops != &omap_gate_clkdm_clk_ops) {
+               reg = ti_clk_get_reg_addr(node, 0);
+               if (!reg)
+                       return;
+
+               if (!of_property_read_u32(node, "ti,bit-shift", &val))
+                       enable_bit = val;
+       }
 
        if (of_clk_get_parent_count(node) != 1) {
-               pr_err("%s must have 1 parent\n", clk_name);
-               goto cleanup;
+               pr_err("%s must have 1 parent\n", node->name);
+               return;
        }
 
        parent_name = of_clk_get_parent_name(node, 0);
-       init.parent_names = &parent_name;
-       init.num_parents = 1;
 
        if (of_property_read_bool(node, "ti,set-rate-parent"))
-               init.flags |= CLK_SET_RATE_PARENT;
+               flags |= CLK_SET_RATE_PARENT;
 
        if (of_property_read_bool(node, "ti,set-bit-to-disable"))
-               clk_hw->flags |= INVERT_ENABLE;
+               clk_gate_flags |= INVERT_ENABLE;
 
-       clk = clk_register(NULL, &clk_hw->hw);
+       clk = _register_gate(NULL, node->name, parent_name, flags, reg,
+                            enable_bit, clk_gate_flags, ops, hw_ops);
 
-       if (!IS_ERR(clk)) {
+       if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               return;
-       }
-
-cleanup:
-       kfree(clk_hw);
 }
 
 static void __init
index 9c3e8c4aaa40c0b8a46048bab734286941a77e6b..265d91f071c5e34554cc71c474278997917a7a6f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -31,53 +32,102 @@ static const struct clk_ops ti_interface_clk_ops = {
        .is_enabled     = &omap2_dflt_clk_is_enabled,
 };
 
-static void __init _of_ti_interface_clk_setup(struct device_node *node,
-                                             const struct clk_hw_omap_ops *ops)
+static struct clk *_register_interface(struct device *dev, const char *name,
+                                      const char *parent_name,
+                                      void __iomem *reg, u8 bit_idx,
+                                      const struct clk_hw_omap_ops *ops)
 {
-       struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *parent_name;
-       u32 val;
+       struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw)
-               return;
+               return ERR_PTR(-ENOMEM);
 
        clk_hw->hw.init = &init;
        clk_hw->ops = ops;
        clk_hw->flags = MEMMAP_ADDRESSING;
+       clk_hw->enable_reg = reg;
+       clk_hw->enable_bit = bit_idx;
 
-       clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-       if (!clk_hw->enable_reg)
-               goto cleanup;
-
-       if (!of_property_read_u32(node, "ti,bit-shift", &val))
-               clk_hw->enable_bit = val;
-
-       init.name = node->name;
+       init.name = name;
        init.ops = &ti_interface_clk_ops;
        init.flags = 0;
 
-       parent_name = of_clk_get_parent_name(node, 0);
-       if (!parent_name) {
-               pr_err("%s must have a parent\n", node->name);
-               goto cleanup;
-       }
-
        init.num_parents = 1;
        init.parent_names = &parent_name;
 
        clk = clk_register(NULL, &clk_hw->hw);
 
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (IS_ERR(clk))
+               kfree(clk_hw);
+       else
                omap2_init_clk_hw_omap_clocks(clk);
+
+       return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_interface(struct ti_clk *setup)
+{
+       const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+       struct ti_clk_gate *gate;
+
+       gate = setup->data;
+       reg_setup = (struct clk_omap_reg *)&reg;
+       reg_setup->index = gate->module;
+       reg_setup->offset = gate->reg;
+
+       if (gate->flags & CLKF_NO_WAIT)
+               ops = &clkhwops_iclk;
+
+       if (gate->flags & CLKF_HSOTGUSB)
+               ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
+
+       if (gate->flags & CLKF_DSS)
+               ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+
+       if (gate->flags & CLKF_SSI)
+               ops = &clkhwops_omap3430es2_iclk_ssi_wait;
+
+       if (gate->flags & CLKF_AM35XX)
+               ops = &clkhwops_am35xx_ipss_wait;
+
+       return _register_interface(NULL, setup->name, gate->parent,
+                                  (void __iomem *)reg, gate->bit_shift, ops);
+}
+#endif
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+                                             const struct clk_hw_omap_ops *ops)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg;
+       u8 enable_bit = 0;
+       u32 val;
+
+       reg = ti_clk_get_reg_addr(node, 0);
+       if (!reg)
+               return;
+
+       if (!of_property_read_u32(node, "ti,bit-shift", &val))
+               enable_bit = val;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       if (!parent_name) {
+               pr_err("%s must have a parent\n", node->name);
                return;
        }
 
-cleanup:
-       kfree(clk_hw);
+       clk = _register_interface(NULL, node->name, parent_name, reg,
+                                 enable_bit, ops);
+
+       if (!IS_ERR(clk))
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
 }
 
 static void __init of_ti_interface_clk_setup(struct device_node *node)
index e9d650e51287d50fd53704636241445667bdcf7c..728e253606bce51a9435e6915ee1a445dcfff606 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -144,6 +145,39 @@ static struct clk *_register_mux(struct device *dev, const char *name,
        return clk;
 }
 
+struct clk *ti_clk_register_mux(struct ti_clk *setup)
+{
+       struct ti_clk_mux *mux;
+       u32 flags;
+       u8 mux_flags = 0;
+       struct clk_omap_reg *reg_setup;
+       u32 reg;
+       u32 mask;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       mux = setup->data;
+       flags = CLK_SET_RATE_NO_REPARENT;
+
+       mask = mux->num_parents;
+       if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE))
+               mask--;
+
+       mask = (1 << fls(mask)) - 1;
+       reg_setup->index = mux->module;
+       reg_setup->offset = mux->reg;
+
+       if (mux->flags & CLKF_INDEX_STARTS_AT_ONE)
+               mux_flags |= CLK_MUX_INDEX_ONE;
+
+       if (mux->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
+                            flags, (void __iomem *)reg, mux->bit_shift, mask,
+                            mux_flags, NULL, NULL);
+}
+
 /**
  * of_mux_clk_setup - Setup function for simple mux rate clock
  * @node: DT node for the clock
@@ -194,8 +228,9 @@ static void of_mux_clk_setup(struct device_node *node)
 
        mask = (1 << fls(mask)) - 1;
 
-       clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
-                           reg, shift, mask, clk_mux_flags, NULL, NULL);
+       clk = _register_mux(NULL, node->name, parent_names, num_parents,
+                           flags, reg, shift, mask, clk_mux_flags, NULL,
+                           NULL);
 
        if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -205,6 +240,37 @@ cleanup:
 }
 CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
 
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
+{
+       struct clk_mux *mux;
+       struct clk_omap_reg *reg;
+       int num_parents;
+
+       if (!setup)
+               return NULL;
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&mux->reg;
+
+       mux->shift = setup->bit_shift;
+
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+               mux->flags |= CLK_MUX_INDEX_ONE;
+
+       num_parents = setup->num_parents;
+
+       mux->mask = num_parents - 1;
+       mux->mask = (1 << fls(mux->mask)) - 1;
+
+       return &mux->hw;
+}
+
 static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
 {
        struct clk_mux *mux;
index bd4769a8448582284b2734b0a77cfa6484306e17..0e950769ed033185cf2ad95587958de586ad1246 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/err.h>
index e2d63bc47436d1ee51014a24dc21ebcc1131ca51..bf63c96acb1a2947ce7e274f2869b51ab8e95550 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/slab.h>
 #include <linux/io.h>
index 9037bebd69f79cd9121d101913bb34d0cffa4420..f870aad57711f6a69d248dc56f0309251ed0da74 100644 (file)
@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
        clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
                        "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
                        26, 0, &armclk_lock);
+       clk_prepare_enable(clks[cpu_2x]);
 
        clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
                        4 + 2 * tmp);
index 1c2506f68122567df92d6197990ee1b015d0984f..68161f7a07d6c8bef677fdd04bb2a1b20715a664 100644 (file)
@@ -63,6 +63,11 @@ config VT8500_TIMER
 config CADENCE_TTC_TIMER
        bool
 
+config ASM9260_TIMER
+       bool
+       select CLKSRC_MMIO
+       select CLKSRC_OF
+
 config CLKSRC_NOMADIK_MTU
        bool
        depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -245,15 +250,4 @@ config CLKSRC_PXA
        help
          This enables OST0 support available on PXA and SA-11x0
          platforms.
-
-config ASM9260_TIMER
-       bool "Alphascale ASM9260 timer driver"
-       depends on GENERIC_CLOCKEVENTS
-       select CLKSRC_MMIO
-       select CLKSRC_OF
-       default y if MACH_ASM9260
-       help
-         This enables build of a clocksource and clockevent driver for
-         the 32-bit System Timer hardware available on a Alphascale ASM9260.
-
 endmenu
index 32a3d25795d3a2b9303db120107d017c13c7165f..68ab42356d0e7a7cd4d97a453465633bba1c5e33 100644 (file)
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
        }
        rate = clk_get_rate(clk);
 
+       mtk_timer_global_reset(evt);
+
        if (request_irq(evt->dev.irq, mtk_timer_interrupt,
                        IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
                pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
 
        evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
 
-       mtk_timer_global_reset(evt);
-
        /* Configure clock source */
        mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
        clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
 
        /* Configure clock event */
        mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
-       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
        clockevents_config_and_register(&evt->dev, rate, 0x3,
                                        0xffffffff);
+
+       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+
        return;
 
 err_clk_disable:
index 941f3f344e08ab2ab552638f0e21ab41718a57ad..d9438af2bbd6b7d001bbdf524b7281a7a0c0816e 100644 (file)
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = {
        .dev_id         = &ckevt_pxa_osmr0,
 };
 
-static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
 {
        timer_writel(0, OIER);
        timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
index 6e6730f9dfd16cd068b77ed1359c16aff4bd017b..3de5f3a9a104c10e74c4cc834d7a853936fab5e4 100644 (file)
@@ -12,7 +12,7 @@ menuconfig CONNECTOR
 if CONNECTOR
 
 config PROC_EVENTS
-       boolean "Report process events to userspace"
+       bool "Report process events to userspace"
        depends on CONNECTOR=y
        default y
        ---help---
index 0f9a2c3c0e0d3eb1699a6a19ebee0532872dbccc..1b06fc4640e23c2840cda6011d7c061eedd64a49 100644 (file)
@@ -26,13 +26,21 @@ config ARM_VEXPRESS_SPC_CPUFREQ
 
 
 config ARM_EXYNOS_CPUFREQ
-       bool
+       tristate "SAMSUNG EXYNOS CPUfreq Driver"
+       depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
+       depends on THERMAL
+       help
+         This adds the CPUFreq driver for Samsung EXYNOS platforms.
+         Supported SoC versions are:
+            Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
+
+         If in doubt, say N.
 
 config ARM_EXYNOS4210_CPUFREQ
        bool "SAMSUNG EXYNOS4210"
        depends on CPU_EXYNOS4210
+       depends on ARM_EXYNOS_CPUFREQ
        default y
-       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS4210
          SoC (S5PV310 or S5PC210).
@@ -42,8 +50,8 @@ config ARM_EXYNOS4210_CPUFREQ
 config ARM_EXYNOS4X12_CPUFREQ
        bool "SAMSUNG EXYNOS4x12"
        depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
+       depends on ARM_EXYNOS_CPUFREQ
        default y
-       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS4X12
          SoC (EXYNOS4212 or EXYNOS4412).
@@ -53,28 +61,14 @@ config ARM_EXYNOS4X12_CPUFREQ
 config ARM_EXYNOS5250_CPUFREQ
        bool "SAMSUNG EXYNOS5250"
        depends on SOC_EXYNOS5250
+       depends on ARM_EXYNOS_CPUFREQ
        default y
-       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS5250
          SoC.
 
          If in doubt, say N.
 
-config ARM_EXYNOS5440_CPUFREQ
-       bool "SAMSUNG EXYNOS5440"
-       depends on SOC_EXYNOS5440
-       depends on HAVE_CLK && OF
-       select PM_OPP
-       default y
-       help
-         This adds the CPUFreq driver for Samsung EXYNOS5440
-         SoC. The nature of exynos5440 clock controller is
-         different than previous exynos controllers so not using
-         the common exynos framework.
-
-         If in doubt, say N.
-
 config ARM_EXYNOS_CPU_FREQ_BOOST_SW
        bool "EXYNOS Frequency Overclocking - Software"
        depends on ARM_EXYNOS_CPUFREQ && THERMAL
@@ -90,6 +84,20 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
 
          If in doubt, say N.
 
+config ARM_EXYNOS5440_CPUFREQ
+       tristate "SAMSUNG EXYNOS5440"
+       depends on SOC_EXYNOS5440
+       depends on HAVE_CLK && OF
+       select PM_OPP
+       default y
+       help
+         This adds the CPUFreq driver for Samsung EXYNOS5440
+         SoC. The nature of exynos5440 clock controller is
+         different than previous exynos controllers so not using
+         the common exynos framework.
+
+         If in doubt, say N.
+
 config ARM_HIGHBANK_CPUFREQ
        tristate "Calxeda Highbank-based"
        depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
index 72564b701b4a7018643c77de03f58a9092ff5f71..7ea24413cee6855c65daa7954bc4843622402e75 100644 (file)
@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE
 config PPC_CORENET_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
        depends on PPC_E500MC && OF && COMMON_CLK
-       select CLK_PPC_CORENET
+       select CLK_QORIQ
        help
          This adds the CPUFreq driver support for Freescale e500mc,
          e5500 and e6500 series SoCs which are capable of changing
index 8b4220ac888b180ba6ef4b1cda06590c5e3ab59f..82a1821471fd870a9eba9323d63c4b925c06a97c 100644 (file)
@@ -52,10 +52,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ)             += arm_big_little_dt.o
 
 obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += exynos-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)   += exynos4210-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)   += exynos4x12-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)   += exynos5250-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += arm-exynos-cpufreq.o
+arm-exynos-cpufreq-y                                   := exynos-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)    += exynos4210-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)    += exynos4x12-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)    += exynos5250-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)     += highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)                += imx6q-cpufreq.o
index f99a0b0b7c06acf61baec88cd667f3003e595b9c..5e98c6b1f284b651f0b74d3673c55f997e47f081 100644 (file)
 #include <linux/cpufreq.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpu.h>
 
 #include "exynos-cpufreq.h"
 
 static struct exynos_dvfs_info *exynos_info;
+static struct thermal_cooling_device *cdev;
 static struct regulator *arm_regulator;
 static unsigned int locking_frequency;
 
@@ -156,6 +159,7 @@ static struct cpufreq_driver exynos_driver = {
 
 static int exynos_cpufreq_probe(struct platform_device *pdev)
 {
+       struct device_node *cpus, *np;
        int ret = -EINVAL;
 
        exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -198,9 +202,36 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        /* Done here as we want to capture boot frequency */
        locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
 
-       if (!cpufreq_register_driver(&exynos_driver))
+       ret = cpufreq_register_driver(&exynos_driver);
+       if (ret)
+               goto err_cpufreq_reg;
+
+       cpus = of_find_node_by_path("/cpus");
+       if (!cpus) {
+               pr_err("failed to find cpus node\n");
+               return 0;
+       }
+
+       np = of_get_next_child(cpus, NULL);
+       if (!np) {
+               pr_err("failed to find cpus child node\n");
+               of_node_put(cpus);
                return 0;
+       }
+
+       if (of_find_property(np, "#cooling-cells", NULL)) {
+               cdev = of_cpufreq_cooling_register(np,
+                                                  cpu_present_mask);
+               if (IS_ERR(cdev))
+                       pr_err("running cpufreq without cooling device: %ld\n",
+                              PTR_ERR(cdev));
+       }
+       of_node_put(np);
+       of_node_put(cpus);
+
+       return 0;
 
+err_cpufreq_reg:
        dev_err(&pdev->dev, "failed to register cpufreq driver\n");
        regulator_put(arm_regulator);
 err_vdd_arm:
index 2fd53eaaec20bf30ef00d6a19226fba13670501a..d6d425773fa497274301eaa88f247fb8dd770e89 100644 (file)
@@ -263,7 +263,7 @@ out:
 }
 
 #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
 {
        int count, v, i, found;
        struct cpufreq_frequency_table *pos;
@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
        .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
 };
 
-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
        struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
        struct cpufreq_frequency_table *pos;
index d00f1cee45094a6c01e004934de6e2928d4e9222..733aa5153e7451f645a65e3452ed44d6d488054a 100644 (file)
@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
        (cfg->info->set_fvco)(cfg);
 }
 
-static inline void s3c_cpufreq_resume_clocks(void)
-{
-       cpu_cur.info->resume_clocks();
-}
-
 static inline void s3c_cpufreq_updateclk(struct clk *clk,
                                         unsigned int freq)
 {
@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
 
        last_target = ~0;       /* invalidate last_target setting */
 
-       /* first, find out what speed we resumed at. */
-       s3c_cpufreq_resume_clocks();
-
        /* whilst we will be called later on, we try and re-set the
         * cpu frequencies as soon as possible so that we do not end
         * up resuming devices and then immediately having to re-set
@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
 };
 
 
-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
 {
        if (!info || !info->name) {
                printk(KERN_ERR "%s: failed to pass valid information\n",
index aedec09579340b2db42095e3acebcbc4776543c8..59372077ec7c1a1b7d64e5ac67880b5e625526b5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/notifier.h>
 #include <linux/clockchips.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
@@ -158,70 +159,83 @@ static int powernv_add_idle_states(void)
        struct device_node *power_mgt;
        int nr_idle_states = 1; /* Snooze */
        int dt_idle_states;
-       const __be32 *idle_state_flags;
-       const __be32 *idle_state_latency;
-       u32 len_flags, flags, latency_ns;
-       int i;
+       u32 *latency_ns, *residency_ns, *flags;
+       int i, rc;
 
        /* Currently we have snooze statically defined */
 
        power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
        if (!power_mgt) {
                pr_warn("opal: PowerMgmt Node not found\n");
-               return nr_idle_states;
+               goto out;
        }
 
-       idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
-       if (!idle_state_flags) {
-               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
-               return nr_idle_states;
+       /* Read values of any property to determine the num of idle states */
+       dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
+       if (dt_idle_states < 0) {
+               pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+               goto out;
        }
 
-       idle_state_latency = of_get_property(power_mgt,
-                       "ibm,cpu-idle-state-latencies-ns", NULL);
-       if (!idle_state_latency) {
-               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
-               return nr_idle_states;
+       flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+       if (of_property_read_u32_array(power_mgt,
+                       "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+               pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
+               goto out_free_flags;
        }
 
-       dt_idle_states = len_flags / sizeof(u32);
+       latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
+       rc = of_property_read_u32_array(power_mgt,
+               "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
+       if (rc) {
+               pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+               goto out_free_latency;
+       }
 
-       for (i = 0; i < dt_idle_states; i++) {
+       residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
+       rc = of_property_read_u32_array(power_mgt,
+               "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
 
-               flags = be32_to_cpu(idle_state_flags[i]);
+       for (i = 0; i < dt_idle_states; i++) {
 
-               /* Cpuidle accepts exit_latency in us and we estimate
-                * target residency to be 10x exit_latency
+               /*
+                * Cpuidle accepts exit_latency and target_residency in us.
+                * Use default target_residency values if f/w does not expose it.
                 */
-               latency_ns = be32_to_cpu(idle_state_latency[i]);
-               if (flags & OPAL_PM_NAP_ENABLED) {
+               if (flags[i] & OPAL_PM_NAP_ENABLED) {
                        /* Add NAP state */
                        strcpy(powernv_states[nr_idle_states].name, "Nap");
                        strcpy(powernv_states[nr_idle_states].desc, "Nap");
                        powernv_states[nr_idle_states].flags = 0;
-                       powernv_states[nr_idle_states].exit_latency =
-                                       ((unsigned int)latency_ns) / 1000;
-                       powernv_states[nr_idle_states].target_residency =
-                                       ((unsigned int)latency_ns / 100);
+                       powernv_states[nr_idle_states].target_residency = 100;
                        powernv_states[nr_idle_states].enter = &nap_loop;
-                       nr_idle_states++;
-               }
-
-               if (flags & OPAL_PM_SLEEP_ENABLED ||
-                       flags & OPAL_PM_SLEEP_ENABLED_ER1) {
+               } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+                       flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
                        /* Add FASTSLEEP state */
                        strcpy(powernv_states[nr_idle_states].name, "FastSleep");
                        strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
                        powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
-                       powernv_states[nr_idle_states].exit_latency =
-                                       ((unsigned int)latency_ns) / 1000;
-                       powernv_states[nr_idle_states].target_residency =
-                                       ((unsigned int)latency_ns / 100);
+                       powernv_states[nr_idle_states].target_residency = 300000;
                        powernv_states[nr_idle_states].enter = &fastsleep_loop;
-                       nr_idle_states++;
                }
+
+               powernv_states[nr_idle_states].exit_latency =
+                               ((unsigned int)latency_ns[i]) / 1000;
+
+               if (!rc) {
+                       powernv_states[nr_idle_states].target_residency =
+                               ((unsigned int)residency_ns[i]) / 1000;
+               }
+
+               nr_idle_states++;
        }
 
+       kfree(residency_ns);
+out_free_latency:
+       kfree(latency_ns);
+out_free_flags:
+       kfree(flags);
+out:
        return nr_idle_states;
 }
 
index d594ae962ed28305a0a610698fbfbede4c80c8ea..fded0a5cfcd72ce5d8539ce5626b11e2bbee38d7 100644 (file)
@@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
        dev_dbg(ctx->device->dev, "[%s]: ", __func__);
 
        chan = ctx->device->dma.chan_mem2cryp;
-       dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+       dmaengine_terminate_all(chan);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
                     ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
 
        chan = ctx->device->dma.chan_cryp2mem;
-       dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+       dmaengine_terminate_all(chan);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
                     ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
 }
index 70a20871e998cce60cdffe9add195d393ffcf326..187a8fd7eee7f5ece139b67cc44cef9b483773ef 100644 (file)
@@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
        struct dma_chan *chan;
 
        chan = ctx->device->dma.chan_mem2hash;
-       dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+       dmaengine_terminate_all(chan);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
                     ctx->device->dma.sg_len, DMA_TO_DEVICE);
 }
index faf30a4e642b32d02ccdca7abdb14321f0149b16..a874b6ec6650f33275c8a59ddb1d00a78c4271e9 100644 (file)
@@ -416,6 +416,15 @@ config NBPFAXI_DMA
        help
          Support for "Type-AXI" NBPF DMA IPs from Renesas
 
+config IMG_MDC_DMA
+       tristate "IMG MDC support"
+       depends on MIPS || COMPILE_TEST
+       depends on MFD_SYSCON
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for the IMG multi-threaded DMA controller (MDC).
+
 config DMA_ENGINE
        bool
 
index 2022b5451377d5d0e87f290669dddf1ded3d0ff5..f915f61ec5747193757e6f2b6da61ebd0eea4d83 100644 (file)
@@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
 obj-$(CONFIG_MX3_IPU) += ipu/
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE_BASE) += sh/
+obj-$(CONFIG_RENESAS_DMA) += sh/
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -50,3 +50,4 @@ obj-y += xilinx/
 obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
 obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
index 1364d00881dd5659d7c083b2f354f95be5749cb8..4a5fd245014e6666721c111b85515b2e1b05f74c 100644 (file)
@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
        return pl08x_cctl(cctl);
 }
 
-static int dma_set_runtime_config(struct dma_chan *chan,
-                                 struct dma_slave_config *config)
-{
-       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
-       struct pl08x_driver_data *pl08x = plchan->host;
-
-       if (!plchan->slave)
-               return -EINVAL;
-
-       /* Reject definitely invalid configurations */
-       if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
-           config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
-               return -EINVAL;
-
-       if (config->device_fc && pl08x->vd->pl080s) {
-               dev_err(&pl08x->adev->dev,
-                       "%s: PL080S does not support peripheral flow control\n",
-                       __func__);
-               return -EINVAL;
-       }
-
-       plchan->cfg = *config;
-
-       return 0;
-}
-
 /*
  * Slave transactions callback to the slave device to allow
  * synchronization of slave DMA signals with the DMAC enable
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
        return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
-static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                        unsigned long arg)
+static int pl08x_config(struct dma_chan *chan,
+                       struct dma_slave_config *config)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+
+       if (!plchan->slave)
+               return -EINVAL;
+
+       /* Reject definitely invalid configurations */
+       if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+           config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+               return -EINVAL;
+
+       if (config->device_fc && pl08x->vd->pl080s) {
+               dev_err(&pl08x->adev->dev,
+                       "%s: PL080S does not support peripheral flow control\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       plchan->cfg = *config;
+
+       return 0;
+}
+
+static int pl08x_terminate_all(struct dma_chan *chan)
 {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
        struct pl08x_driver_data *pl08x = plchan->host;
        unsigned long flags;
-       int ret = 0;
 
-       /* Controls applicable to inactive channels */
-       if (cmd == DMA_SLAVE_CONFIG) {
-               return dma_set_runtime_config(chan,
-                                             (struct dma_slave_config *)arg);
+       spin_lock_irqsave(&plchan->vc.lock, flags);
+       if (!plchan->phychan && !plchan->at) {
+               spin_unlock_irqrestore(&plchan->vc.lock, flags);
+               return 0;
        }
 
+       plchan->state = PL08X_CHAN_IDLE;
+
+       if (plchan->phychan) {
+               /*
+                * Mark physical channel as free and free any slave
+                * signal
+                */
+               pl08x_phy_free(plchan);
+       }
+       /* Dequeue jobs and free LLIs */
+       if (plchan->at) {
+               pl08x_desc_free(&plchan->at->vd);
+               plchan->at = NULL;
+       }
+       /* Dequeue jobs not yet fired as well */
+       pl08x_free_txd_list(pl08x, plchan);
+
+       spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+       return 0;
+}
+
+static int pl08x_pause(struct dma_chan *chan)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       unsigned long flags;
+
        /*
         * Anything succeeds on channels with no physical allocation and
         * no queued transfers.
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                return 0;
        }
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               plchan->state = PL08X_CHAN_IDLE;
+       pl08x_pause_phy_chan(plchan->phychan);
+       plchan->state = PL08X_CHAN_PAUSED;
 
-               if (plchan->phychan) {
-                       /*
-                        * Mark physical channel as free and free any slave
-                        * signal
-                        */
-                       pl08x_phy_free(plchan);
-               }
-               /* Dequeue jobs and free LLIs */
-               if (plchan->at) {
-                       pl08x_desc_free(&plchan->at->vd);
-                       plchan->at = NULL;
-               }
-               /* Dequeue jobs not yet fired as well */
-               pl08x_free_txd_list(pl08x, plchan);
-               break;
-       case DMA_PAUSE:
-               pl08x_pause_phy_chan(plchan->phychan);
-               plchan->state = PL08X_CHAN_PAUSED;
-               break;
-       case DMA_RESUME:
-               pl08x_resume_phy_chan(plchan->phychan);
-               plchan->state = PL08X_CHAN_RUNNING;
-               break;
-       default:
-               /* Unknown command */
-               ret = -ENXIO;
-               break;
+       spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+       return 0;
+}
+
+static int pl08x_resume(struct dma_chan *chan)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       unsigned long flags;
+
+       /*
+        * Anything succeeds on channels with no physical allocation and
+        * no queued transfers.
+        */
+       spin_lock_irqsave(&plchan->vc.lock, flags);
+       if (!plchan->phychan && !plchan->at) {
+               spin_unlock_irqrestore(&plchan->vc.lock, flags);
+               return 0;
        }
 
+       pl08x_resume_phy_chan(plchan->phychan);
+       plchan->state = PL08X_CHAN_RUNNING;
+
        spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
-       return ret;
+       return 0;
 }
 
 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
        pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
        pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
-       pl08x->memcpy.device_control = pl08x_control;
+       pl08x->memcpy.device_config = pl08x_config;
+       pl08x->memcpy.device_pause = pl08x_pause;
+       pl08x->memcpy.device_resume = pl08x_resume;
+       pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
 
        /* Initialize slave engine */
        dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        pl08x->slave.device_issue_pending = pl08x_issue_pending;
        pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
        pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
-       pl08x->slave.device_control = pl08x_control;
+       pl08x->slave.device_config = pl08x_config;
+       pl08x->slave.device_pause = pl08x_pause;
+       pl08x->slave.device_resume = pl08x_resume;
+       pl08x->slave.device_terminate_all = pl08x_terminate_all;
 
        /* Get the platform data */
        pl08x->pd = dev_get_platdata(&adev->dev);
index ca9dd261328357d079466b9d0659e43d57a0da4c..1e1a4c5675426048dcef03616c15d8e744db2c6e 100644 (file)
 #define        ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
 #define        ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
                                |ATC_DIF(AT_DMA_MEM_IF))
+#define ATC_DMA_BUSWIDTHS\
+       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
 /*
  * Initial number of descriptors to allocate for each channel. This could
@@ -972,11 +977,13 @@ err_out:
        return NULL;
 }
 
-static int set_runtime_config(struct dma_chan *chan,
-                             struct dma_slave_config *sconfig)
+static int atc_config(struct dma_chan *chan,
+                     struct dma_slave_config *sconfig)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 
+       dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
        /* Check if it is chan is configured for slave transfers */
        if (!chan->private)
                return -EINVAL;
@@ -989,9 +996,28 @@ static int set_runtime_config(struct dma_chan *chan,
        return 0;
 }
 
+static int atc_pause(struct dma_chan *chan)
+{
+       struct at_dma_chan      *atchan = to_at_dma_chan(chan);
+       struct at_dma           *atdma = to_at_dma(chan->device);
+       int                     chan_id = atchan->chan_common.chan_id;
+       unsigned long           flags;
 
-static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                      unsigned long arg)
+       LIST_HEAD(list);
+
+       dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
+       spin_lock_irqsave(&atchan->lock, flags);
+
+       dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
+       set_bit(ATC_IS_PAUSED, &atchan->status);
+
+       spin_unlock_irqrestore(&atchan->lock, flags);
+
+       return 0;
+}
+
+static int atc_resume(struct dma_chan *chan)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma           *atdma = to_at_dma(chan->device);
@@ -1000,60 +1026,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
        LIST_HEAD(list);
 
-       dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
+       dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
-       if (cmd == DMA_PAUSE) {
-               spin_lock_irqsave(&atchan->lock, flags);
+       if (!atc_chan_is_paused(atchan))
+               return 0;
 
-               dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
-               set_bit(ATC_IS_PAUSED, &atchan->status);
+       spin_lock_irqsave(&atchan->lock, flags);
 
-               spin_unlock_irqrestore(&atchan->lock, flags);
-       } else if (cmd == DMA_RESUME) {
-               if (!atc_chan_is_paused(atchan))
-                       return 0;
+       dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
+       clear_bit(ATC_IS_PAUSED, &atchan->status);
 
-               spin_lock_irqsave(&atchan->lock, flags);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
-               dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
-               clear_bit(ATC_IS_PAUSED, &atchan->status);
+       return 0;
+}
 
-               spin_unlock_irqrestore(&atchan->lock, flags);
-       } else if (cmd == DMA_TERMINATE_ALL) {
-               struct at_desc  *desc, *_desc;
-               /*
-                * This is only called when something went wrong elsewhere, so
-                * we don't really care about the data. Just disable the
-                * channel. We still have to poll the channel enable bit due
-                * to AHB/HSB limitations.
-                */
-               spin_lock_irqsave(&atchan->lock, flags);
+static int atc_terminate_all(struct dma_chan *chan)
+{
+       struct at_dma_chan      *atchan = to_at_dma_chan(chan);
+       struct at_dma           *atdma = to_at_dma(chan->device);
+       int                     chan_id = atchan->chan_common.chan_id;
+       struct at_desc          *desc, *_desc;
+       unsigned long           flags;
 
-               /* disabling channel: must also remove suspend state */
-               dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
+       LIST_HEAD(list);
 
-               /* confirm that this channel is disabled */
-               while (dma_readl(atdma, CHSR) & atchan->mask)
-                       cpu_relax();
+       dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
-               /* active_list entries will end up before queued entries */
-               list_splice_init(&atchan->queue, &list);
-               list_splice_init(&atchan->active_list, &list);
+       /*
+        * This is only called when something went wrong elsewhere, so
+        * we don't really care about the data. Just disable the
+        * channel. We still have to poll the channel enable bit due
+        * to AHB/HSB limitations.
+        */
+       spin_lock_irqsave(&atchan->lock, flags);
 
-               /* Flush all pending and queued descriptors */
-               list_for_each_entry_safe(desc, _desc, &list, desc_node)
-                       atc_chain_complete(atchan, desc);
+       /* disabling channel: must also remove suspend state */
+       dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
 
-               clear_bit(ATC_IS_PAUSED, &atchan->status);
-               /* if channel dedicated to cyclic operations, free it */
-               clear_bit(ATC_IS_CYCLIC, &atchan->status);
+       /* confirm that this channel is disabled */
+       while (dma_readl(atdma, CHSR) & atchan->mask)
+               cpu_relax();
 
-               spin_unlock_irqrestore(&atchan->lock, flags);
-       } else if (cmd == DMA_SLAVE_CONFIG) {
-               return set_runtime_config(chan, (struct dma_slave_config *)arg);
-       } else {
-               return -ENXIO;
-       }
+       /* active_list entries will end up before queued entries */
+       list_splice_init(&atchan->queue, &list);
+       list_splice_init(&atchan->active_list, &list);
+
+       /* Flush all pending and queued descriptors */
+       list_for_each_entry_safe(desc, _desc, &list, desc_node)
+               atc_chain_complete(atchan, desc);
+
+       clear_bit(ATC_IS_PAUSED, &atchan->status);
+       /* if channel dedicated to cyclic operations, free it */
+       clear_bit(ATC_IS_CYCLIC, &atchan->status);
+
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1505,7 +1532,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
                /* controller can do slave DMA: can trigger cyclic transfers */
                dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
                atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
-               atdma->dma_common.device_control = atc_control;
+               atdma->dma_common.device_config = atc_config;
+               atdma->dma_common.device_pause = atc_pause;
+               atdma->dma_common.device_resume = atc_resume;
+               atdma->dma_common.device_terminate_all = atc_terminate_all;
+               atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
+               atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
+               atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+               atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        }
 
        dma_writel(atdma, EN, AT_DMA_ENABLE);
@@ -1622,7 +1656,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
        if (!atc_chan_is_paused(atchan)) {
                dev_warn(chan2dev(chan),
                "cyclic channel not paused, should be done by channel user\n");
-               atc_control(chan, DMA_PAUSE, 0);
+               atc_pause(chan);
        }
 
        /* now preserve additional data for cyclic operations */
index 2787aba60c6bdee8ed0489cb932338d6a81528ac..d6bba6c636c2b9ab4f05331084e07f59b797f1aa 100644 (file)
@@ -232,7 +232,8 @@ enum atc_status {
  * @save_dscr: for cyclic operations, preserve next descriptor address in
  *             the cyclic list on suspend/resume cycle
  * @remain_desc: to save remain desc length
- * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
+ * @dma_sconfig: configuration for slave transfers, passed via
+ * .device_config
  * @lock: serializes enqueue/dequeue operations to descriptors lists
  * @active_list: list of descriptors dmaengine is being running on
  * @queue: list of descriptors ready to be submitted to engine
index b60d77a22df673c25cf72259ad3be6f7416b19d6..09e2825a547a2098cc28a0e3e20079c33c52fe09 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dmapool.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/of_dma.h>
 
 #define AT_XDMAC_MAX_CHAN      0x20
 
+#define AT_XDMAC_DMA_BUSWIDTHS\
+       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
 enum atc_status {
        AT_XDMAC_CHAN_IS_CYCLIC = 0,
        AT_XDMAC_CHAN_IS_PAUSED,
@@ -184,15 +192,15 @@ struct at_xdmac_chan {
        struct dma_chan                 chan;
        void __iomem                    *ch_regs;
        u32                             mask;           /* Channel Mask */
-       u32                             cfg[3];         /* Channel Configuration Register */
-       #define AT_XDMAC_CUR_CFG        0               /* Current channel conf */
-       #define AT_XDMAC_DEV_TO_MEM_CFG 1               /* Predifined dev to mem channel conf */
-       #define AT_XDMAC_MEM_TO_DEV_CFG 2               /* Predifined mem to dev channel conf */
+       u32                             cfg[2];         /* Channel Configuration Register */
+       #define AT_XDMAC_DEV_TO_MEM_CFG 0               /* Predifined dev to mem channel conf */
+       #define AT_XDMAC_MEM_TO_DEV_CFG 1               /* Predifined mem to dev channel conf */
        u8                              perid;          /* Peripheral ID */
        u8                              perif;          /* Peripheral Interface */
        u8                              memif;          /* Memory Interface */
        u32                             per_src_addr;
        u32                             per_dst_addr;
+       u32                             save_cc;
        u32                             save_cim;
        u32                             save_cnda;
        u32                             save_cndc;
@@ -344,20 +352,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
 
        /*
-        * When doing memory to memory transfer we need to use the next
+        * When doing non cyclic transfer we need to use the next
         * descriptor view 2 since some fields of the configuration register
         * depend on transfer size and src/dest addresses.
         */
-       if (is_slave_direction(first->direction)) {
+       if (at_xdmac_chan_is_cyclic(atchan)) {
                reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
-               if (first->direction == DMA_MEM_TO_DEV)
-                       atchan->cfg[AT_XDMAC_CUR_CFG] =
-                               atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
-               else
-                       atchan->cfg[AT_XDMAC_CUR_CFG] =
-                               atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
-               at_xdmac_chan_write(atchan, AT_XDMAC_CC,
-                                   atchan->cfg[AT_XDMAC_CUR_CFG]);
+               at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
        } else {
                /*
                 * No need to write AT_XDMAC_CC reg, it will be done when the
@@ -561,7 +562,6 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct at_xdmac_desc    *first = NULL, *prev = NULL;
        struct scatterlist      *sg;
        int                     i;
-       u32                     cfg;
        unsigned int            xfer_size = 0;
 
        if (!sgl)
@@ -583,7 +583,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        /* Prepare descriptors. */
        for_each_sg(sgl, sg, sg_len, i) {
                struct at_xdmac_desc    *desc = NULL;
-               u32                     len, mem;
+               u32                     len, mem, dwidth, fixed_dwidth;
 
                len = sg_dma_len(sg);
                mem = sg_dma_address(sg);
@@ -608,17 +608,21 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                if (direction == DMA_DEV_TO_MEM) {
                        desc->lld.mbr_sa = atchan->per_src_addr;
                        desc->lld.mbr_da = mem;
-                       cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
                } else {
                        desc->lld.mbr_sa = mem;
                        desc->lld.mbr_da = atchan->per_dst_addr;
-                       cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
                }
-               desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1               /* next descriptor view */
-                       | AT_XDMAC_MBR_UBC_NDEN                         /* next descriptor dst parameter update */
-                       | AT_XDMAC_MBR_UBC_NSEN                         /* next descriptor src parameter update */
-                       | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)  /* descriptor fetch */
-                       | len / (1 << at_xdmac_get_dwidth(cfg));        /* microblock length */
+               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+               fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
+                              ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+                              : AT_XDMAC_CC_DWIDTH_BYTE;
+               desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2                       /* next descriptor view */
+                       | AT_XDMAC_MBR_UBC_NDEN                                 /* next descriptor dst parameter update */
+                       | AT_XDMAC_MBR_UBC_NSEN                                 /* next descriptor src parameter update */
+                       | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)          /* descriptor fetch */
+                       | (len >> fixed_dwidth);                                /* microblock length */
                dev_dbg(chan2dev(chan),
                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
@@ -882,7 +886,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status         ret;
        int                     residue;
        u32                     cur_nda, mask, value;
-       u8                      dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]);
+       u8                      dwidth = 0;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -912,7 +916,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
         */
        mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
        value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
-       if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) {
+       if ((desc->lld.mbr_cfg & mask) == value) {
                at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
                while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
                        cpu_relax();
@@ -926,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
         */
        descs_list = &desc->descs_list;
        list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
+               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
                residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
                if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
                        break;
@@ -1107,58 +1112,80 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
        return;
 }
 
-static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                           unsigned long arg)
+static int at_xdmac_device_config(struct dma_chan *chan,
+                                 struct dma_slave_config *config)
+{
+       struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
+       int ret;
+
+       dev_dbg(chan2dev(chan), "%s\n", __func__);
+
+       spin_lock_bh(&atchan->lock);
+       ret = at_xdmac_set_slave_config(chan, config);
+       spin_unlock_bh(&atchan->lock);
+
+       return ret;
+}
+
+static int at_xdmac_device_pause(struct dma_chan *chan)
 {
-       struct at_xdmac_desc    *desc, *_desc;
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
-       int                     ret = 0;
 
-       dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
+       dev_dbg(chan2dev(chan), "%s\n", __func__);
+
+       if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
+               return 0;
 
        spin_lock_bh(&atchan->lock);
+       at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+       while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
+              & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
+               cpu_relax();
+       spin_unlock_bh(&atchan->lock);
 
-       switch (cmd) {
-       case DMA_PAUSE:
-               at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
-               set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
-               break;
+       return 0;
+}
 
-       case DMA_RESUME:
-               if (!at_xdmac_chan_is_paused(atchan))
-                       break;
+static int at_xdmac_device_resume(struct dma_chan *chan)
+{
+       struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
+       struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
 
-               at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
-               clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
-               break;
+       dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       case DMA_TERMINATE_ALL:
-               at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
-               while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
-                       cpu_relax();
+       spin_lock_bh(&atchan->lock);
+       if (!at_xdmac_chan_is_paused(atchan))
+               return 0;
+
+       at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+       clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+       spin_unlock_bh(&atchan->lock);
 
-               /* Cancel all pending transfers. */
-               list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
-                       at_xdmac_remove_xfer(atchan, desc);
+       return 0;
+}
+
+static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+{
+       struct at_xdmac_desc    *desc, *_desc;
+       struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
+       struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
 
-               clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
-               break;
+       dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       case DMA_SLAVE_CONFIG:
-               ret = at_xdmac_set_slave_config(chan,
-                               (struct dma_slave_config *)arg);
-               break;
+       spin_lock_bh(&atchan->lock);
+       at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+       while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+               cpu_relax();
 
-       default:
-               dev_err(chan2dev(chan),
-                       "unmanaged or unknown dma control cmd: %d\n", cmd);
-               ret = -ENXIO;
-       }
+       /* Cancel all pending transfers. */
+       list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+               at_xdmac_remove_xfer(atchan, desc);
 
+       clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
        spin_unlock_bh(&atchan->lock);
 
-       return ret;
+       return 0;
 }
 
 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
@@ -1217,27 +1244,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
        return;
 }
 
-#define AT_XDMAC_DMA_BUSWIDTHS\
-       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
-       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
-       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
-       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
-       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
-                                     struct dma_slave_caps *caps)
-{
-
-       caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
-       caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = true;
-       caps->cmd_terminate = true;
-       caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int atmel_xdmac_prepare(struct device *dev)
 {
@@ -1268,9 +1274,10 @@ static int atmel_xdmac_suspend(struct device *dev)
        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
                struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 
+               atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
                if (at_xdmac_chan_is_cyclic(atchan)) {
                        if (!at_xdmac_chan_is_paused(atchan))
-                               at_xdmac_control(chan, DMA_PAUSE, 0);
+                               at_xdmac_device_pause(chan);
                        atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
                        atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
                        atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
@@ -1290,7 +1297,6 @@ static int atmel_xdmac_resume(struct device *dev)
        struct at_xdmac_chan    *atchan;
        struct dma_chan         *chan, *_chan;
        int                     i;
-       u32                     cfg;
 
        clk_prepare_enable(atxdmac->clk);
 
@@ -1305,8 +1311,7 @@ static int atmel_xdmac_resume(struct device *dev)
        at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
                atchan = to_at_xdmac_chan(chan);
-               cfg = atchan->cfg[AT_XDMAC_CUR_CFG];
-               at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
+               at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
                if (at_xdmac_chan_is_cyclic(atchan)) {
                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
@@ -1407,8 +1412,14 @@ static int at_xdmac_probe(struct platform_device *pdev)
        atxdmac->dma.device_prep_dma_cyclic             = at_xdmac_prep_dma_cyclic;
        atxdmac->dma.device_prep_dma_memcpy             = at_xdmac_prep_dma_memcpy;
        atxdmac->dma.device_prep_slave_sg               = at_xdmac_prep_slave_sg;
-       atxdmac->dma.device_control                     = at_xdmac_control;
-       atxdmac->dma.device_slave_caps                  = at_xdmac_device_slave_caps;
+       atxdmac->dma.device_config                      = at_xdmac_device_config;
+       atxdmac->dma.device_pause                       = at_xdmac_device_pause;
+       atxdmac->dma.device_resume                      = at_xdmac_device_resume;
+       atxdmac->dma.device_terminate_all               = at_xdmac_device_terminate_all;
+       atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+       atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+       atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
        /* Disable all chans and interrupts. */
        at_xdmac_off(atxdmac);
@@ -1507,7 +1518,6 @@ static struct platform_driver at_xdmac_driver = {
        .remove         = at_xdmac_remove,
        .driver = {
                .name           = "at_xdmac",
-               .owner          = THIS_MODULE,
                .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
                .pm             = &atmel_xdmac_dev_pm_ops,
        }
index 918b7b3f766f03b19e15792a760eaf544342a182..0723096fb50ac125dbb471126ba396085307ff2e 100644 (file)
@@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
        return vchan_tx_prep(&c->vc, &d->vd, flags);
 }
 
-static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
-               struct dma_slave_config *cfg)
+static int bcm2835_dma_slave_config(struct dma_chan *chan,
+                                   struct dma_slave_config *cfg)
 {
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
        if ((cfg->direction == DMA_DEV_TO_MEM &&
             cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
            (cfg->direction == DMA_MEM_TO_DEV &&
@@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
        return 0;
 }
 
-static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 {
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
        struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
        unsigned long flags;
        int timeout = 10000;
@@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
        return 0;
 }
 
-static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
-{
-       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
-
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               return bcm2835_dma_slave_config(c,
-                               (struct dma_slave_config *)arg);
-
-       case DMA_TERMINATE_ALL:
-               return bcm2835_dma_terminate_all(c);
-
-       default:
-               return -ENXIO;
-       }
-}
-
 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
 {
        struct bcm2835_chan *c;
@@ -565,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
        return chan;
 }
 
-static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
-       struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-       caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = false;
-       caps->cmd_terminate = true;
-
-       return 0;
-}
-
 static int bcm2835_dma_probe(struct platform_device *pdev)
 {
        struct bcm2835_dmadev *od;
@@ -615,9 +588,12 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
        od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
        od->ddev.device_tx_status = bcm2835_dma_tx_status;
        od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
-       od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
        od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
-       od->ddev.device_control = bcm2835_dma_control;
+       od->ddev.device_config = bcm2835_dma_slave_config;
+       od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
+       od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
        spin_lock_init(&od->lock);
index e88588d8ecd34b82dc23098ee73ea93a00e416fc..fd22dd36985f2b13fb4b1ccc9cc94c84751bd11b 100644 (file)
@@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan)
  * Pauses a transfer without losing data. Enables power save.
  * Use this function in conjunction with coh901318_resume.
  */
-static void coh901318_pause(struct dma_chan *chan)
+static int coh901318_pause(struct dma_chan *chan)
 {
        u32 val;
        unsigned long flags;
@@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan)
        enable_powersave(cohc);
 
        spin_unlock_irqrestore(&cohc->lock, flags);
+       return 0;
 }
 
 /* Resumes a transfer that has been stopped via 300_dma_stop(..).
    Power save is handled.
 */
-static void coh901318_resume(struct dma_chan *chan)
+static int coh901318_resume(struct dma_chan *chan)
 {
        u32 val;
        unsigned long flags;
@@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan)
        }
 
        spin_unlock_irqrestore(&cohc->lock, flags);
+       return 0;
 }
 
 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2114,6 +2116,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int coh901318_terminate_all(struct dma_chan *chan)
+{
+       unsigned long flags;
+       struct coh901318_chan *cohc = to_coh901318_chan(chan);
+       struct coh901318_desc *cohd;
+       void __iomem *virtbase = cohc->base->virtbase;
+
+       /* The remainder of this function terminates the transfer */
+       coh901318_pause(chan);
+       spin_lock_irqsave(&cohc->lock, flags);
+
+       /* Clear any pending BE or TC interrupt */
+       if (cohc->id < 32) {
+               writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
+               writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
+       } else {
+               writel(1 << (cohc->id - 32), virtbase +
+                      COH901318_BE_INT_CLEAR2);
+               writel(1 << (cohc->id - 32), virtbase +
+                      COH901318_TC_INT_CLEAR2);
+       }
+
+       enable_powersave(cohc);
+
+       while ((cohd = coh901318_first_active_get(cohc))) {
+               /* release the lli allocation*/
+               coh901318_lli_free(&cohc->base->pool, &cohd->lli);
+
+               /* return desc to free-list */
+               coh901318_desc_remove(cohd);
+               coh901318_desc_free(cohc, cohd);
+       }
+
+       while ((cohd = coh901318_first_queued(cohc))) {
+               /* release the lli allocation*/
+               coh901318_lli_free(&cohc->base->pool, &cohd->lli);
+
+               /* return desc to free-list */
+               coh901318_desc_remove(cohd);
+               coh901318_desc_free(cohc, cohd);
+       }
+
+
+       cohc->nbr_active_done = 0;
+       cohc->busy = 0;
+
+       spin_unlock_irqrestore(&cohc->lock, flags);
+
+       return 0;
+}
+
 static int coh901318_alloc_chan_resources(struct dma_chan *chan)
 {
        struct coh901318_chan   *cohc = to_coh901318_chan(chan);
@@ -2156,7 +2209,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
 
        spin_unlock_irqrestore(&cohc->lock, flags);
 
-       dmaengine_terminate_all(chan);
+       coh901318_terminate_all(chan);
 }
 
 
@@ -2461,8 +2514,8 @@ static const struct burst_table burst_sizes[] = {
        },
 };
 
-static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
-                       struct dma_slave_config *config)
+static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
+                                          struct dma_slave_config *config)
 {
        struct coh901318_chan *cohc = to_coh901318_chan(chan);
        dma_addr_t addr;
@@ -2482,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
                maxburst = config->dst_maxburst;
        } else {
                dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
-               return;
+               return -EINVAL;
        }
 
        dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
@@ -2528,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
        default:
                dev_err(COHC_2_DEV(cohc),
                        "bad runtimeconfig: alien address width\n");
-               return;
+               return -EINVAL;
        }
 
        ctrl |= burst_sizes[i].reg;
@@ -2538,84 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
 
        cohc->addr = addr;
        cohc->ctrl = ctrl;
-}
-
-static int
-coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                 unsigned long arg)
-{
-       unsigned long flags;
-       struct coh901318_chan *cohc = to_coh901318_chan(chan);
-       struct coh901318_desc *cohd;
-       void __iomem *virtbase = cohc->base->virtbase;
-
-       if (cmd == DMA_SLAVE_CONFIG) {
-               struct dma_slave_config *config =
-                       (struct dma_slave_config *) arg;
-
-               coh901318_dma_set_runtimeconfig(chan, config);
-               return 0;
-         }
-
-       if (cmd == DMA_PAUSE) {
-               coh901318_pause(chan);
-               return 0;
-       }
-
-       if (cmd == DMA_RESUME) {
-               coh901318_resume(chan);
-               return 0;
-       }
-
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
-
-       /* The remainder of this function terminates the transfer */
-       coh901318_pause(chan);
-       spin_lock_irqsave(&cohc->lock, flags);
-
-       /* Clear any pending BE or TC interrupt */
-       if (cohc->id < 32) {
-               writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
-               writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
-       } else {
-               writel(1 << (cohc->id - 32), virtbase +
-                      COH901318_BE_INT_CLEAR2);
-               writel(1 << (cohc->id - 32), virtbase +
-                      COH901318_TC_INT_CLEAR2);
-       }
-
-       enable_powersave(cohc);
-
-       while ((cohd = coh901318_first_active_get(cohc))) {
-               /* release the lli allocation*/
-               coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
-               /* return desc to free-list */
-               coh901318_desc_remove(cohd);
-               coh901318_desc_free(cohc, cohd);
-       }
-
-       while ((cohd = coh901318_first_queued(cohc))) {
-               /* release the lli allocation*/
-               coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
-               /* return desc to free-list */
-               coh901318_desc_remove(cohd);
-               coh901318_desc_free(cohc, cohd);
-       }
-
-
-       cohc->nbr_active_done = 0;
-       cohc->busy = 0;
-
-       spin_unlock_irqrestore(&cohc->lock, flags);
 
        return 0;
 }
 
-void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
-                        struct coh901318_base *base)
+static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
+                               struct coh901318_base *base)
 {
        int chans_i;
        int i = 0;
@@ -2717,7 +2698,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
        base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
        base->dma_slave.device_tx_status = coh901318_tx_status;
        base->dma_slave.device_issue_pending = coh901318_issue_pending;
-       base->dma_slave.device_control = coh901318_control;
+       base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
+       base->dma_slave.device_pause = coh901318_pause;
+       base->dma_slave.device_resume = coh901318_resume;
+       base->dma_slave.device_terminate_all = coh901318_terminate_all;
        base->dma_slave.dev = &pdev->dev;
 
        err = dma_async_device_register(&base->dma_slave);
@@ -2737,7 +2721,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
        base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
        base->dma_memcpy.device_tx_status = coh901318_tx_status;
        base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
-       base->dma_memcpy.device_control = coh901318_control;
+       base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
+       base->dma_memcpy.device_pause = coh901318_pause;
+       base->dma_memcpy.device_resume = coh901318_resume;
+       base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
        base->dma_memcpy.dev = &pdev->dev;
        /*
         * This controller can only access address at even 32bit boundaries,
index b743adf56465644670ed77f4a4153cce9015491a..512cb8e2805e797ef12760d2e445652186410137 100644 (file)
@@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
        return &c->txd;
 }
 
-static int cpp41_cfg_chan(struct cppi41_channel *c,
-               struct dma_slave_config *cfg)
-{
-       return 0;
-}
-
 static void cppi41_compute_td_desc(struct cppi41_desc *d)
 {
        d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
@@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan)
        return 0;
 }
 
-static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       int ret;
-
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
-               break;
-
-       case DMA_TERMINATE_ALL:
-               ret = cppi41_stop_chan(chan);
-               break;
-
-       default:
-               ret = -ENXIO;
-               break;
-       }
-       return ret;
-}
-
 static void cleanup_chans(struct cppi41_dd *cdd)
 {
        while (!list_empty(&cdd->ddev.channels)) {
@@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        cdd->ddev.device_tx_status = cppi41_dma_tx_status;
        cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
        cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
-       cdd->ddev.device_control = cppi41_dma_control;
+       cdd->ddev.device_terminate_all = cppi41_stop_chan;
        cdd->ddev.dev = dev;
        INIT_LIST_HEAD(&cdd->ddev.channels);
        cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
index bdeafeefa5f61b994341348cc6c438b7698ed035..4527a3ebeac446f58a4c3b3b8722d6caf16b1b63 100644 (file)
@@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
 }
 
 static int jz4740_dma_slave_config(struct dma_chan *c,
-       const struct dma_slave_config *config)
+                                  struct dma_slave_config *config)
 {
        struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
        struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c)
        return 0;
 }
 
-static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
-{
-       struct dma_slave_config *config = (struct dma_slave_config *)arg;
-
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               return jz4740_dma_slave_config(chan, config);
-       case DMA_TERMINATE_ALL:
-               return jz4740_dma_terminate_all(chan);
-       default:
-               return -ENOSYS;
-       }
-}
-
 static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
 {
        struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
        dd->device_issue_pending = jz4740_dma_issue_pending;
        dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
        dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
-       dd->device_control = jz4740_dma_control;
+       dd->device_config = jz4740_dma_slave_config;
+       dd->device_terminate_all = jz4740_dma_terminate_all;
        dd->dev = &pdev->dev;
        INIT_LIST_HEAD(&dd->channels);
 
index e057935e3023194a108d4ab69c53796a5bc4605b..f15712f2fec6c06949c23f845eeb8bbb776b8907 100644 (file)
@@ -222,31 +222,35 @@ static void balance_ref_count(struct dma_chan *chan)
  */
 static int dma_chan_get(struct dma_chan *chan)
 {
-       int err = -ENODEV;
        struct module *owner = dma_chan_to_owner(chan);
+       int ret;
 
+       /* The channel is already in use, update client count */
        if (chan->client_count) {
                __module_get(owner);
-               err = 0;
-       } else if (try_module_get(owner))
-               err = 0;
+               goto out;
+       }
 
-       if (err == 0)
-               chan->client_count++;
+       if (!try_module_get(owner))
+               return -ENODEV;
 
        /* allocate upon first client reference */
-       if (chan->client_count == 1 && err == 0) {
-               int desc_cnt = chan->device->device_alloc_chan_resources(chan);
-
-               if (desc_cnt < 0) {
-                       err = desc_cnt;
-                       chan->client_count = 0;
-                       module_put(owner);
-               } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
-                       balance_ref_count(chan);
+       if (chan->device->device_alloc_chan_resources) {
+               ret = chan->device->device_alloc_chan_resources(chan);
+               if (ret < 0)
+                       goto err_out;
        }
 
-       return err;
+       if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+               balance_ref_count(chan);
+
+out:
+       chan->client_count++;
+       return 0;
+
+err_out:
+       module_put(owner);
+       return ret;
 }
 
 /**
@@ -257,11 +261,15 @@ static int dma_chan_get(struct dma_chan *chan)
  */
 static void dma_chan_put(struct dma_chan *chan)
 {
+       /* This channel is not in use, bail out */
        if (!chan->client_count)
-               return; /* this channel failed alloc_chan_resources */
+               return;
+
        chan->client_count--;
        module_put(dma_chan_to_owner(chan));
-       if (chan->client_count == 0)
+
+       /* This channel is not in use anymore, free it */
+       if (!chan->client_count && chan->device->device_free_chan_resources)
                chan->device->device_free_chan_resources(chan);
 }
 
@@ -471,6 +479,39 @@ static void dma_channel_rebalance(void)
                }
 }
 
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+       struct dma_device *device;
+
+       if (!chan || !caps)
+               return -EINVAL;
+
+       device = chan->device;
+
+       /* check if the channel supports slave transactions */
+       if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
+               return -ENXIO;
+
+       /*
+        * Check whether it reports it uses the generic slave
+        * capabilities, if not, that means it doesn't support any
+        * kind of slave capabilities reporting.
+        */
+       if (!device->directions)
+               return -ENXIO;
+
+       caps->src_addr_widths = device->src_addr_widths;
+       caps->dst_addr_widths = device->dst_addr_widths;
+       caps->directions = device->directions;
+       caps->residue_granularity = device->residue_granularity;
+
+       caps->cmd_pause = !!device->device_pause;
+       caps->cmd_terminate = !!device->device_terminate_all;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_caps);
+
 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
                                          struct dma_device *dev,
                                          dma_filter_fn fn, void *fn_param)
@@ -811,17 +852,16 @@ int dma_async_device_register(struct dma_device *device)
                !device->device_prep_dma_sg);
        BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
                !device->device_prep_dma_cyclic);
-       BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
-               !device->device_control);
        BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
                !device->device_prep_interleaved_dma);
 
-       BUG_ON(!device->device_alloc_chan_resources);
-       BUG_ON(!device->device_free_chan_resources);
        BUG_ON(!device->device_tx_status);
        BUG_ON(!device->device_issue_pending);
        BUG_ON(!device->dev);
 
+       WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
+            "this driver doesn't support generic slave capabilities reporting\n");
+
        /* note: this only matters in the
         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
         */
index a8d7809e2f4c639aeb1c9dbbf6f83f36765c93b4..220ee49633e49e88c041c8796a88a83370087e0f 100644 (file)
@@ -349,14 +349,14 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
                       unsigned long data)
 {
        pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
-                  current->comm, n, err, src_off, dst_off, len, data);
+                current->comm, n, err, src_off, dst_off, len, data);
 }
 
-#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
-       if (verbose) \
-               result(err, n, src_off, dst_off, len, data); \
-       else \
-               dbg_result(err, n, src_off, dst_off, len, data); \
+#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
+       if (verbose)                                            \
+               result(err, n, src_off, dst_off, len, data);    \
+       else                                                    \
+               dbg_result(err, n, src_off, dst_off, len, data);\
 })
 
 static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
@@ -405,7 +405,6 @@ static int dmatest_func(void *data)
        struct dmatest_params   *params;
        struct dma_chan         *chan;
        struct dma_device       *dev;
-       unsigned int            src_off, dst_off, len;
        unsigned int            error_count;
        unsigned int            failed_tests = 0;
        unsigned int            total_tests = 0;
@@ -484,6 +483,7 @@ static int dmatest_func(void *data)
                struct dmaengine_unmap_data *um;
                dma_addr_t srcs[src_cnt];
                dma_addr_t *dsts;
+               unsigned int src_off, dst_off, len;
                u8 align = 0;
 
                total_tests++;
@@ -502,15 +502,21 @@ static int dmatest_func(void *data)
                        break;
                }
 
-               if (params->noverify) {
+               if (params->noverify)
                        len = params->buf_size;
+               else
+                       len = dmatest_random() % params->buf_size + 1;
+
+               len = (len >> align) << align;
+               if (!len)
+                       len = 1 << align;
+
+               total_len += len;
+
+               if (params->noverify) {
                        src_off = 0;
                        dst_off = 0;
                } else {
-                       len = dmatest_random() % params->buf_size + 1;
-                       len = (len >> align) << align;
-                       if (!len)
-                               len = 1 << align;
                        src_off = dmatest_random() % (params->buf_size - len + 1);
                        dst_off = dmatest_random() % (params->buf_size - len + 1);
 
@@ -523,11 +529,6 @@ static int dmatest_func(void *data)
                                          params->buf_size);
                }
 
-               len = (len >> align) << align;
-               if (!len)
-                       len = 1 << align;
-               total_len += len;
-
                um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
                                              GFP_KERNEL);
                if (!um) {
index 5c062548957c3183fba608e13354d204c0a4b40c..455b7a4f1e87fe890019eb6acd459e8d7c000667 100644 (file)
  */
 #define NR_DESCS_PER_CHANNEL   64
 
+/* The set of bus widths supported by the DMA controller */
+#define DW_DMA_BUSWIDTHS                         \
+       BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
 /*----------------------------------------------------------------------*/
 
 static struct device *chan2dev(struct dma_chan *chan)
@@ -955,8 +962,7 @@ static inline void convert_burst(u32 *maxburst)
                *maxburst = 0;
 }
 
-static int
-set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
 
@@ -973,16 +979,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
        return 0;
 }
 
-static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+static int dwc_pause(struct dma_chan *chan)
 {
-       u32 cfglo = channel_readl(dwc, CFG_LO);
-       unsigned int count = 20;        /* timeout iterations */
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       unsigned long           flags;
+       unsigned int            count = 20;     /* timeout iterations */
+       u32                     cfglo;
+
+       spin_lock_irqsave(&dwc->lock, flags);
 
+       cfglo = channel_readl(dwc, CFG_LO);
        channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
        while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
                udelay(2);
 
        dwc->paused = true;
+
+       spin_unlock_irqrestore(&dwc->lock, flags);
+
+       return 0;
 }
 
 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
@@ -994,53 +1009,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
        dwc->paused = false;
 }
 
-static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                      unsigned long arg)
+static int dwc_resume(struct dma_chan *chan)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
-       struct dw_dma           *dw = to_dw_dma(chan->device);
-       struct dw_desc          *desc, *_desc;
        unsigned long           flags;
-       LIST_HEAD(list);
 
-       if (cmd == DMA_PAUSE) {
-               spin_lock_irqsave(&dwc->lock, flags);
+       if (!dwc->paused)
+               return 0;
 
-               dwc_chan_pause(dwc);
+       spin_lock_irqsave(&dwc->lock, flags);
 
-               spin_unlock_irqrestore(&dwc->lock, flags);
-       } else if (cmd == DMA_RESUME) {
-               if (!dwc->paused)
-                       return 0;
+       dwc_chan_resume(dwc);
 
-               spin_lock_irqsave(&dwc->lock, flags);
+       spin_unlock_irqrestore(&dwc->lock, flags);
 
-               dwc_chan_resume(dwc);
+       return 0;
+}
 
-               spin_unlock_irqrestore(&dwc->lock, flags);
-       } else if (cmd == DMA_TERMINATE_ALL) {
-               spin_lock_irqsave(&dwc->lock, flags);
+static int dwc_terminate_all(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(chan->device);
+       struct dw_desc          *desc, *_desc;
+       unsigned long           flags;
+       LIST_HEAD(list);
 
-               clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+       spin_lock_irqsave(&dwc->lock, flags);
 
-               dwc_chan_disable(dw, dwc);
+       clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+
+       dwc_chan_disable(dw, dwc);
 
-               dwc_chan_resume(dwc);
+       dwc_chan_resume(dwc);
 
-               /* active_list entries will end up before queued entries */
-               list_splice_init(&dwc->queue, &list);
-               list_splice_init(&dwc->active_list, &list);
+       /* active_list entries will end up before queued entries */
+       list_splice_init(&dwc->queue, &list);
+       list_splice_init(&dwc->active_list, &list);
 
-               spin_unlock_irqrestore(&dwc->lock, flags);
+       spin_unlock_irqrestore(&dwc->lock, flags);
 
-               /* Flush all pending and queued descriptors */
-               list_for_each_entry_safe(desc, _desc, &list, desc_node)
-                       dwc_descriptor_complete(dwc, desc, false);
-       } else if (cmd == DMA_SLAVE_CONFIG) {
-               return set_runtime_config(chan, (struct dma_slave_config *)arg);
-       } else {
-               return -ENXIO;
-       }
+       /* Flush all pending and queued descriptors */
+       list_for_each_entry_safe(desc, _desc, &list, desc_node)
+               dwc_descriptor_complete(dwc, desc, false);
 
        return 0;
 }
@@ -1551,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                }
        } else {
                dw->nr_masters = pdata->nr_masters;
-               memcpy(dw->data_width, pdata->data_width, 4);
+               for (i = 0; i < dw->nr_masters; i++)
+                       dw->data_width[i] = pdata->data_width[i];
        }
 
        /* Calculate all channel mask before DMA setup */
@@ -1656,13 +1667,23 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        dw->dma.device_free_chan_resources = dwc_free_chan_resources;
 
        dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
-
        dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
-       dw->dma.device_control = dwc_control;
+
+       dw->dma.device_config = dwc_config;
+       dw->dma.device_pause = dwc_pause;
+       dw->dma.device_resume = dwc_resume;
+       dw->dma.device_terminate_all = dwc_terminate_all;
 
        dw->dma.device_tx_status = dwc_tx_status;
        dw->dma.device_issue_pending = dwc_issue_pending;
 
+       /* DMA capabilities */
+       dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
+       dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
+       dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+                            BIT(DMA_MEM_TO_MEM);
+       dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
        err = dma_async_device_register(&dw->dma);
        if (err)
                goto err_dma_register;
index 32ea1aca7a0ea27dc28ddd58c20281f482f53c45..6565a361e7e51c7e80bbbc6d920880b47aa71af8 100644 (file)
@@ -100,7 +100,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct dw_dma_platform_data *pdata;
-       u32 tmp, arr[4];
+       u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
 
        if (!np) {
                dev_err(&pdev->dev, "Missing DT data\n");
@@ -127,7 +127,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
                pdata->block_size = tmp;
 
        if (!of_property_read_u32(np, "dma-masters", &tmp)) {
-               if (tmp > 4)
+               if (tmp > DW_DMA_MAX_NR_MASTERS)
                        return NULL;
 
                pdata->nr_masters = tmp;
index 848e232f7cc755991eda6e4022771d7cba7e1c20..241ff2b1402bf95572d053c09616414ba5eb2044 100644 (file)
@@ -252,7 +252,7 @@ struct dw_dma_chan {
        u8                      src_master;
        u8                      dst_master;
 
-       /* configuration passed via DMA_SLAVE_CONFIG */
+       /* configuration passed via .device_config */
        struct dma_slave_config dma_sconfig;
 };
 
@@ -285,7 +285,7 @@ struct dw_dma {
 
        /* hardware configuration */
        unsigned char           nr_masters;
-       unsigned char           data_width[4];
+       unsigned char           data_width[DW_DMA_MAX_NR_MASTERS];
 };
 
 static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
index b969206439b7232442c0b4da6e6066f06c218fa3..276157f22612dc18140b84294ab1ef49ecf46ebc 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
+#include <linux/edma.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -244,8 +245,9 @@ static void edma_execute(struct edma_chan *echan)
        }
 }
 
-static int edma_terminate_all(struct edma_chan *echan)
+static int edma_terminate_all(struct dma_chan *chan)
 {
+       struct edma_chan *echan = to_edma_chan(chan);
        unsigned long flags;
        LIST_HEAD(head);
 
@@ -273,9 +275,11 @@ static int edma_terminate_all(struct edma_chan *echan)
        return 0;
 }
 
-static int edma_slave_config(struct edma_chan *echan,
+static int edma_slave_config(struct dma_chan *chan,
        struct dma_slave_config *cfg)
 {
+       struct edma_chan *echan = to_edma_chan(chan);
+
        if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
            cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
                return -EINVAL;
@@ -285,8 +289,10 @@ static int edma_slave_config(struct edma_chan *echan,
        return 0;
 }
 
-static int edma_dma_pause(struct edma_chan *echan)
+static int edma_dma_pause(struct dma_chan *chan)
 {
+       struct edma_chan *echan = to_edma_chan(chan);
+
        /* Pause/Resume only allowed with cyclic mode */
        if (!echan->edesc || !echan->edesc->cyclic)
                return -EINVAL;
@@ -295,8 +301,10 @@ static int edma_dma_pause(struct edma_chan *echan)
        return 0;
 }
 
-static int edma_dma_resume(struct edma_chan *echan)
+static int edma_dma_resume(struct dma_chan *chan)
 {
+       struct edma_chan *echan = to_edma_chan(chan);
+
        /* Pause/Resume only allowed with cyclic mode */
        if (!echan->edesc->cyclic)
                return -EINVAL;
@@ -305,36 +313,6 @@ static int edma_dma_resume(struct edma_chan *echan)
        return 0;
 }
 
-static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                       unsigned long arg)
-{
-       int ret = 0;
-       struct dma_slave_config *config;
-       struct edma_chan *echan = to_edma_chan(chan);
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               edma_terminate_all(echan);
-               break;
-       case DMA_SLAVE_CONFIG:
-               config = (struct dma_slave_config *)arg;
-               ret = edma_slave_config(echan, config);
-               break;
-       case DMA_PAUSE:
-               ret = edma_dma_pause(echan);
-               break;
-
-       case DMA_RESUME:
-               ret = edma_dma_resume(echan);
-               break;
-
-       default:
-               ret = -ENOSYS;
-       }
-
-       return ret;
-}
-
 /*
  * A PaRAM set configuration abstraction used by other modes
  * @chan: Channel who's PaRAM set we're configuring
@@ -557,7 +535,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
        return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
 }
 
-struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
        struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        size_t len, unsigned long tx_flags)
 {
@@ -994,19 +972,6 @@ static void __init edma_chan_init(struct edma_cc *ecc,
                                 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
                                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
-static int edma_dma_device_slave_caps(struct dma_chan *dchan,
-                                     struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
-       caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = true;
-       caps->cmd_terminate = true;
-       caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-       return 0;
-}
-
 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
                          struct device *dev)
 {
@@ -1017,8 +982,16 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
        dma->device_free_chan_resources = edma_free_chan_resources;
        dma->device_issue_pending = edma_issue_pending;
        dma->device_tx_status = edma_tx_status;
-       dma->device_control = edma_control;
-       dma->device_slave_caps = edma_dma_device_slave_caps;
+       dma->device_config = edma_slave_config;
+       dma->device_pause = edma_dma_pause;
+       dma->device_resume = edma_dma_resume;
+       dma->device_terminate_all = edma_terminate_all;
+
+       dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+       dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+       dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
        dma->dev = dev;
 
        /*
index 7650470196c46c6b0d26d26184d613d11f040712..24e5290faa320108fb9a48a754c39ac606ed6927 100644 (file)
@@ -144,7 +144,7 @@ struct ep93xx_dma_desc {
  * @queue: pending descriptors which are handled next
  * @free_list: list of free descriptors which can be used
  * @runtime_addr: physical address currently used as dest/src (M2M only). This
- *                is set via %DMA_SLAVE_CONFIG before slave operation is
+ *                is set via .device_config before slave operation is
  *                prepared
  * @runtime_ctrl: M2M runtime values for the control register.
  *
@@ -1164,13 +1164,14 @@ fail:
 
 /**
  * ep93xx_dma_terminate_all - terminate all transactions
- * @edmac: channel
+ * @chan: channel
  *
  * Stops all DMA transactions. All descriptors are put back to the
  * @edmac->free_list and callbacks are _not_ called.
  */
-static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+static int ep93xx_dma_terminate_all(struct dma_chan *chan)
 {
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
        struct ep93xx_dma_desc *desc, *_d;
        unsigned long flags;
        LIST_HEAD(list);
@@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
        return 0;
 }
 
-static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+static int ep93xx_dma_slave_config(struct dma_chan *chan,
                                   struct dma_slave_config *config)
 {
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
        enum dma_slave_buswidth width;
        unsigned long flags;
        u32 addr, ctrl;
@@ -1241,36 +1243,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
        return 0;
 }
 
-/**
- * ep93xx_dma_control - manipulate all pending operations on a channel
- * @chan: channel
- * @cmd: control command to perform
- * @arg: optional argument
- *
- * Controls the channel. Function returns %0 in case of success or negative
- * error in case of failure.
- */
-static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                             unsigned long arg)
-{
-       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
-       struct dma_slave_config *config;
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               return ep93xx_dma_terminate_all(edmac);
-
-       case DMA_SLAVE_CONFIG:
-               config = (struct dma_slave_config *)arg;
-               return ep93xx_dma_slave_config(edmac, config);
-
-       default:
-               break;
-       }
-
-       return -ENOSYS;
-}
-
 /**
  * ep93xx_dma_tx_status - check if a transaction is completed
  * @chan: channel
@@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
-       dma_dev->device_control = ep93xx_dma_control;
+       dma_dev->device_config = ep93xx_dma_slave_config;
+       dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
        dma_dev->device_tx_status = ep93xx_dma_tx_status;
 
index e9ebb89e17119544616c4430acecb01cab79dc22..09e2842d15ecfffbe02f3fd726f972194dd122a2 100644 (file)
@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
        kfree(fsl_desc);
 }
 
-static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+static int fsl_edma_terminate_all(struct dma_chan *chan)
 {
        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-       struct dma_slave_config *cfg = (void *)arg;
        unsigned long flags;
        LIST_HEAD(head);
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+       fsl_edma_disable_request(fsl_chan);
+       fsl_chan->edesc = NULL;
+       vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+       return 0;
+}
+
+static int fsl_edma_pause(struct dma_chan *chan)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+       if (fsl_chan->edesc) {
                fsl_edma_disable_request(fsl_chan);
-               fsl_chan->edesc = NULL;
-               vchan_get_all_descriptors(&fsl_chan->vchan, &head);
-               spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-               vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
-               return 0;
-
-       case DMA_SLAVE_CONFIG:
-               fsl_chan->fsc.dir = cfg->direction;
-               if (cfg->direction == DMA_DEV_TO_MEM) {
-                       fsl_chan->fsc.dev_addr = cfg->src_addr;
-                       fsl_chan->fsc.addr_width = cfg->src_addr_width;
-                       fsl_chan->fsc.burst = cfg->src_maxburst;
-                       fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
-               } else if (cfg->direction == DMA_MEM_TO_DEV) {
-                       fsl_chan->fsc.dev_addr = cfg->dst_addr;
-                       fsl_chan->fsc.addr_width = cfg->dst_addr_width;
-                       fsl_chan->fsc.burst = cfg->dst_maxburst;
-                       fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
-               } else {
-                       return -EINVAL;
-               }
-               return 0;
+               fsl_chan->status = DMA_PAUSED;
+       }
+       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+       return 0;
+}
 
-       case DMA_PAUSE:
-               spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-               if (fsl_chan->edesc) {
-                       fsl_edma_disable_request(fsl_chan);
-                       fsl_chan->status = DMA_PAUSED;
-               }
-               spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-               return 0;
-
-       case DMA_RESUME:
-               spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-               if (fsl_chan->edesc) {
-                       fsl_edma_enable_request(fsl_chan);
-                       fsl_chan->status = DMA_IN_PROGRESS;
-               }
-               spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-               return 0;
+static int fsl_edma_resume(struct dma_chan *chan)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       unsigned long flags;
 
-       default:
-               return -ENXIO;
+       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+       if (fsl_chan->edesc) {
+               fsl_edma_enable_request(fsl_chan);
+               fsl_chan->status = DMA_IN_PROGRESS;
+       }
+       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+       return 0;
+}
+
+static int fsl_edma_slave_config(struct dma_chan *chan,
+                                struct dma_slave_config *cfg)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+       fsl_chan->fsc.dir = cfg->direction;
+       if (cfg->direction == DMA_DEV_TO_MEM) {
+               fsl_chan->fsc.dev_addr = cfg->src_addr;
+               fsl_chan->fsc.addr_width = cfg->src_addr_width;
+               fsl_chan->fsc.burst = cfg->src_maxburst;
+               fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
+       } else if (cfg->direction == DMA_MEM_TO_DEV) {
+               fsl_chan->fsc.dev_addr = cfg->dst_addr;
+               fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+               fsl_chan->fsc.burst = cfg->dst_maxburst;
+               fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
+       } else {
+                       return -EINVAL;
        }
+       return 0;
 }
 
 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
@@ -780,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan)
        fsl_chan->tcd_pool = NULL;
 }
 
-static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
-               struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
-       caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = true;
-       caps->cmd_terminate = true;
-
-       return 0;
-}
-
 static int
 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
 {
@@ -917,9 +912,15 @@ static int fsl_edma_probe(struct platform_device *pdev)
        fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
        fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
        fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
-       fsl_edma->dma_dev.device_control = fsl_edma_control;
+       fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
+       fsl_edma->dma_dev.device_pause = fsl_edma_pause;
+       fsl_edma->dma_dev.device_resume = fsl_edma_resume;
+       fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
        fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
-       fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
+
+       fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+       fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+       fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 
        platform_set_drvdata(pdev, fsl_edma);
 
index 38821cdf862b6c7473ced93c1a1f48f5db4fc42a..300f821f1890ecdbfba062ceb8c03c780efaaff6 100644 (file)
@@ -941,84 +941,56 @@ fail:
        return NULL;
 }
 
-/**
- * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: DMAEngine flags
- * @context: transaction context (ignored)
- *
- * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
- * DMA_SLAVE API, this gets the device-specific information from the
- * chan->private variable.
- */
-static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
-       struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_transfer_direction direction, unsigned long flags,
-       void *context)
+static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
 {
-       /*
-        * This operation is not supported on the Freescale DMA controller
-        *
-        * However, we need to provide the function pointer to allow the
-        * device_control() method to work.
-        */
-       return NULL;
-}
-
-static int fsl_dma_device_control(struct dma_chan *dchan,
-                                 enum dma_ctrl_cmd cmd, unsigned long arg)
-{
-       struct dma_slave_config *config;
        struct fsldma_chan *chan;
-       int size;
 
        if (!dchan)
                return -EINVAL;
 
        chan = to_fsl_chan(dchan);
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               spin_lock_bh(&chan->desc_lock);
-
-               /* Halt the DMA engine */
-               dma_halt(chan);
+       spin_lock_bh(&chan->desc_lock);
 
-               /* Remove and free all of the descriptors in the LD queue */
-               fsldma_free_desc_list(chan, &chan->ld_pending);
-               fsldma_free_desc_list(chan, &chan->ld_running);
-               fsldma_free_desc_list(chan, &chan->ld_completed);
-               chan->idle = true;
+       /* Halt the DMA engine */
+       dma_halt(chan);
 
-               spin_unlock_bh(&chan->desc_lock);
-               return 0;
+       /* Remove and free all of the descriptors in the LD queue */
+       fsldma_free_desc_list(chan, &chan->ld_pending);
+       fsldma_free_desc_list(chan, &chan->ld_running);
+       fsldma_free_desc_list(chan, &chan->ld_completed);
+       chan->idle = true;
 
-       case DMA_SLAVE_CONFIG:
-               config = (struct dma_slave_config *)arg;
+       spin_unlock_bh(&chan->desc_lock);
+       return 0;
+}
 
-               /* make sure the channel supports setting burst size */
-               if (!chan->set_request_count)
-                       return -ENXIO;
+static int fsl_dma_device_config(struct dma_chan *dchan,
+                                struct dma_slave_config *config)
+{
+       struct fsldma_chan *chan;
+       int size;
 
-               /* we set the controller burst size depending on direction */
-               if (config->direction == DMA_MEM_TO_DEV)
-                       size = config->dst_addr_width * config->dst_maxburst;
-               else
-                       size = config->src_addr_width * config->src_maxburst;
+       if (!dchan)
+               return -EINVAL;
 
-               chan->set_request_count(chan, size);
-               return 0;
+       chan = to_fsl_chan(dchan);
 
-       default:
+       /* make sure the channel supports setting burst size */
+       if (!chan->set_request_count)
                return -ENXIO;
-       }
 
+       /* we set the controller burst size depending on direction */
+       if (config->direction == DMA_MEM_TO_DEV)
+               size = config->dst_addr_width * config->dst_maxburst;
+       else
+               size = config->src_addr_width * config->src_maxburst;
+
+       chan->set_request_count(chan, size);
        return 0;
 }
 
+
 /**
  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
  * @chan : Freescale DMA channel
@@ -1395,10 +1367,15 @@ static int fsldma_of_probe(struct platform_device *op)
        fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
        fdev->common.device_tx_status = fsl_tx_status;
        fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
-       fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
-       fdev->common.device_control = fsl_dma_device_control;
+       fdev->common.device_config = fsl_dma_device_config;
+       fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
        fdev->common.dev = &op->dev;
 
+       fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
+       fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
+       fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
        dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
 
        platform_set_drvdata(op, fdev);
index 239c20c84382ce4c724858f5af5ed5c92a5c1416..31bffccdcc75f950be6dbabc91019a2dfeae4c0f 100644 (file)
 #define FSL_DMA_DGSR_EOSI      0x02
 #define FSL_DMA_DGSR_EOLSI     0x01
 
+#define FSL_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 typedef u64 __bitwise v64;
 typedef u32 __bitwise v32;
 
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
new file mode 100644 (file)
index 0000000..ed045a9
--- /dev/null
@@ -0,0 +1,1011 @@
+/*
+ * IMG Multi-threaded DMA Controller (MDC)
+ *
+ * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define MDC_MAX_DMA_CHANNELS                   32
+
+#define MDC_GENERAL_CONFIG                     0x000
+#define MDC_GENERAL_CONFIG_LIST_IEN            BIT(31)
+#define MDC_GENERAL_CONFIG_IEN                 BIT(29)
+#define MDC_GENERAL_CONFIG_LEVEL_INT           BIT(28)
+#define MDC_GENERAL_CONFIG_INC_W               BIT(12)
+#define MDC_GENERAL_CONFIG_INC_R               BIT(8)
+#define MDC_GENERAL_CONFIG_PHYSICAL_W          BIT(7)
+#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT       4
+#define MDC_GENERAL_CONFIG_WIDTH_W_MASK                0x7
+#define MDC_GENERAL_CONFIG_PHYSICAL_R          BIT(3)
+#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT       0
+#define MDC_GENERAL_CONFIG_WIDTH_R_MASK                0x7
+
+#define MDC_READ_PORT_CONFIG                   0x004
+#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT     28
+#define MDC_READ_PORT_CONFIG_STHREAD_MASK      0xf
+#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT     24
+#define MDC_READ_PORT_CONFIG_RTHREAD_MASK      0xf
+#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT     16
+#define MDC_READ_PORT_CONFIG_WTHREAD_MASK      0xf
+#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT  4
+#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK   0xff
+#define MDC_READ_PORT_CONFIG_DREQ_ENABLE       BIT(1)
+
+#define MDC_READ_ADDRESS                       0x008
+
+#define MDC_WRITE_ADDRESS                      0x00c
+
+#define MDC_TRANSFER_SIZE                      0x010
+#define MDC_TRANSFER_SIZE_MASK                 0xffffff
+
+#define MDC_LIST_NODE_ADDRESS                  0x014
+
+#define MDC_CMDS_PROCESSED                     0x018
+#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT        16
+#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
+#define MDC_CMDS_PROCESSED_INT_ACTIVE          BIT(8)
+#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT     0
+#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK      0x3f
+
+#define MDC_CONTROL_AND_STATUS                 0x01c
+#define MDC_CONTROL_AND_STATUS_CANCEL          BIT(20)
+#define MDC_CONTROL_AND_STATUS_LIST_EN         BIT(4)
+#define MDC_CONTROL_AND_STATUS_EN              BIT(0)
+
+#define MDC_ACTIVE_TRANSFER_SIZE               0x030
+
+#define MDC_GLOBAL_CONFIG_A                            0x900
+#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT      16
+#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK       0xff
+#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT         8
+#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK          0xff
+#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT                0
+#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK         0xff
+
+struct mdc_hw_list_desc {
+       u32 gen_conf;
+       u32 readport_conf;
+       u32 read_addr;
+       u32 write_addr;
+       u32 xfer_size;
+       u32 node_addr;
+       u32 cmds_done;
+       u32 ctrl_status;
+       /*
+        * Not part of the list descriptor, but instead used by the CPU to
+        * traverse the list.
+        */
+       struct mdc_hw_list_desc *next_desc;
+};
+
+struct mdc_tx_desc {
+       struct mdc_chan *chan;
+       struct virt_dma_desc vd;
+       dma_addr_t list_phys;
+       struct mdc_hw_list_desc *list;
+       bool cyclic;
+       bool cmd_loaded;
+       unsigned int list_len;
+       unsigned int list_period_len;
+       size_t list_xfer_size;
+       unsigned int list_cmds_done;
+};
+
+struct mdc_chan {
+       struct mdc_dma *mdma;
+       struct virt_dma_chan vc;
+       struct dma_slave_config config;
+       struct mdc_tx_desc *desc;
+       int irq;
+       unsigned int periph;
+       unsigned int thread;
+       unsigned int chan_nr;
+};
+
+struct mdc_dma_soc_data {
+       void (*enable_chan)(struct mdc_chan *mchan);
+       void (*disable_chan)(struct mdc_chan *mchan);
+};
+
+struct mdc_dma {
+       struct dma_device dma_dev;
+       void __iomem *regs;
+       struct clk *clk;
+       struct dma_pool *desc_pool;
+       struct regmap *periph_regs;
+       spinlock_t lock;
+       unsigned int nr_threads;
+       unsigned int nr_channels;
+       unsigned int bus_width;
+       unsigned int max_burst_mult;
+       unsigned int max_xfer_size;
+       const struct mdc_dma_soc_data *soc;
+       struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
+};
+
+static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
+{
+       return readl(mdma->regs + reg);
+}
+
+static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
+{
+       writel(val, mdma->regs + reg);
+}
+
+static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
+{
+       return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
+}
+
+static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
+{
+       mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
+}
+
+static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
+{
+       return container_of(to_virt_chan(c), struct mdc_chan, vc);
+}
+
+static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
+{
+       struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
+
+       return container_of(vdesc, struct mdc_tx_desc, vd);
+}
+
+static inline struct device *mdma2dev(struct mdc_dma *mdma)
+{
+       return mdma->dma_dev.dev;
+}
+
+static inline unsigned int to_mdc_width(unsigned int bytes)
+{
+       return ffs(bytes) - 1;
+}
+
+static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
+                                     unsigned int bytes)
+{
+       ldesc->gen_conf |= to_mdc_width(bytes) <<
+               MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
+}
+
+static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
+                                      unsigned int bytes)
+{
+       ldesc->gen_conf |= to_mdc_width(bytes) <<
+               MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
+}
+
+static void mdc_list_desc_config(struct mdc_chan *mchan,
+                                struct mdc_hw_list_desc *ldesc,
+                                enum dma_transfer_direction dir,
+                                dma_addr_t src, dma_addr_t dst, size_t len)
+{
+       struct mdc_dma *mdma = mchan->mdma;
+       unsigned int max_burst, burst_size;
+
+       ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
+               MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
+               MDC_GENERAL_CONFIG_PHYSICAL_R;
+       ldesc->readport_conf =
+               (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
+               (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
+               (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
+       ldesc->read_addr = src;
+       ldesc->write_addr = dst;
+       ldesc->xfer_size = len - 1;
+       ldesc->node_addr = 0;
+       ldesc->cmds_done = 0;
+       ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
+               MDC_CONTROL_AND_STATUS_EN;
+       ldesc->next_desc = NULL;
+
+       if (IS_ALIGNED(dst, mdma->bus_width) &&
+           IS_ALIGNED(src, mdma->bus_width))
+               max_burst = mdma->bus_width * mdma->max_burst_mult;
+       else
+               max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
+
+       if (dir == DMA_MEM_TO_DEV) {
+               ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
+               ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
+               mdc_set_read_width(ldesc, mdma->bus_width);
+               mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
+               burst_size = min(max_burst, mchan->config.dst_maxburst *
+                                mchan->config.dst_addr_width);
+       } else if (dir == DMA_DEV_TO_MEM) {
+               ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
+               ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
+               mdc_set_read_width(ldesc, mchan->config.src_addr_width);
+               mdc_set_write_width(ldesc, mdma->bus_width);
+               burst_size = min(max_burst, mchan->config.src_maxburst *
+                                mchan->config.src_addr_width);
+       } else {
+               ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
+                       MDC_GENERAL_CONFIG_INC_W;
+               mdc_set_read_width(ldesc, mdma->bus_width);
+               mdc_set_write_width(ldesc, mdma->bus_width);
+               burst_size = max_burst;
+       }
+       ldesc->readport_conf |= (burst_size - 1) <<
+               MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
+}
+
+static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
+{
+       struct mdc_dma *mdma = mdesc->chan->mdma;
+       struct mdc_hw_list_desc *curr, *next;
+       dma_addr_t curr_phys, next_phys;
+
+       curr = mdesc->list;
+       curr_phys = mdesc->list_phys;
+       while (curr) {
+               next = curr->next_desc;
+               next_phys = curr->node_addr;
+               dma_pool_free(mdma->desc_pool, curr, curr_phys);
+               curr = next;
+               curr_phys = next_phys;
+       }
+}
+
+static void mdc_desc_free(struct virt_dma_desc *vd)
+{
+       struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
+
+       mdc_list_desc_free(mdesc);
+       kfree(mdesc);
+}
+
+static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
+       struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
+       unsigned long flags)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       struct mdc_dma *mdma = mchan->mdma;
+       struct mdc_tx_desc *mdesc;
+       struct mdc_hw_list_desc *curr, *prev = NULL;
+       dma_addr_t curr_phys, prev_phys;
+
+       if (!len)
+               return NULL;
+
+       mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
+       if (!mdesc)
+               return NULL;
+       mdesc->chan = mchan;
+       mdesc->list_xfer_size = len;
+
+       while (len > 0) {
+               size_t xfer_size;
+
+               curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
+               if (!curr)
+                       goto free_desc;
+
+               if (prev) {
+                       prev->node_addr = curr_phys;
+                       prev->next_desc = curr;
+               } else {
+                       mdesc->list_phys = curr_phys;
+                       mdesc->list = curr;
+               }
+
+               xfer_size = min_t(size_t, mdma->max_xfer_size, len);
+
+               mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
+                                    xfer_size);
+
+               prev = curr;
+               prev_phys = curr_phys;
+
+               mdesc->list_len++;
+               src += xfer_size;
+               dest += xfer_size;
+               len -= xfer_size;
+       }
+
+       return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
+
+free_desc:
+       mdc_desc_free(&mdesc->vd);
+
+       return NULL;
+}
+
+static int mdc_check_slave_width(struct mdc_chan *mchan,
+                                enum dma_transfer_direction dir)
+{
+       enum dma_slave_buswidth width;
+
+       if (dir == DMA_MEM_TO_DEV)
+               width = mchan->config.dst_addr_width;
+       else
+               width = mchan->config.src_addr_width;
+
+       switch (width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (width > mchan->mdma->bus_width)
+               return -EINVAL;
+
+       return 0;
+}
+
+static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
+       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction dir,
+       unsigned long flags)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       struct mdc_dma *mdma = mchan->mdma;
+       struct mdc_tx_desc *mdesc;
+       struct mdc_hw_list_desc *curr, *prev = NULL;
+       dma_addr_t curr_phys, prev_phys;
+
+       if (!buf_len && !period_len)
+               return NULL;
+
+       if (!is_slave_direction(dir))
+               return NULL;
+
+       if (mdc_check_slave_width(mchan, dir) < 0)
+               return NULL;
+
+       mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
+       if (!mdesc)
+               return NULL;
+       mdesc->chan = mchan;
+       mdesc->cyclic = true;
+       mdesc->list_xfer_size = buf_len;
+       mdesc->list_period_len = DIV_ROUND_UP(period_len,
+                                             mdma->max_xfer_size);
+
+       while (buf_len > 0) {
+               size_t remainder = min(period_len, buf_len);
+
+               while (remainder > 0) {
+                       size_t xfer_size;
+
+                       curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
+                                             &curr_phys);
+                       if (!curr)
+                               goto free_desc;
+
+                       if (!prev) {
+                               mdesc->list_phys = curr_phys;
+                               mdesc->list = curr;
+                       } else {
+                               prev->node_addr = curr_phys;
+                               prev->next_desc = curr;
+                       }
+
+                       xfer_size = min_t(size_t, mdma->max_xfer_size,
+                                         remainder);
+
+                       if (dir == DMA_MEM_TO_DEV) {
+                               mdc_list_desc_config(mchan, curr, dir,
+                                                    buf_addr,
+                                                    mchan->config.dst_addr,
+                                                    xfer_size);
+                       } else {
+                               mdc_list_desc_config(mchan, curr, dir,
+                                                    mchan->config.src_addr,
+                                                    buf_addr,
+                                                    xfer_size);
+                       }
+
+                       prev = curr;
+                       prev_phys = curr_phys;
+
+                       mdesc->list_len++;
+                       buf_addr += xfer_size;
+                       buf_len -= xfer_size;
+                       remainder -= xfer_size;
+               }
+       }
+       prev->node_addr = mdesc->list_phys;
+
+       return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
+
+free_desc:
+       mdc_desc_free(&mdesc->vd);
+
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl,
+       unsigned int sg_len, enum dma_transfer_direction dir,
+       unsigned long flags, void *context)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       struct mdc_dma *mdma = mchan->mdma;
+       struct mdc_tx_desc *mdesc;
+       struct scatterlist *sg;
+       struct mdc_hw_list_desc *curr, *prev = NULL;
+       dma_addr_t curr_phys, prev_phys;
+       unsigned int i;
+
+       if (!sgl)
+               return NULL;
+
+       if (!is_slave_direction(dir))
+               return NULL;
+
+       if (mdc_check_slave_width(mchan, dir) < 0)
+               return NULL;
+
+       mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
+       if (!mdesc)
+               return NULL;
+       mdesc->chan = mchan;
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               dma_addr_t buf = sg_dma_address(sg);
+               size_t buf_len = sg_dma_len(sg);
+
+               while (buf_len > 0) {
+                       size_t xfer_size;
+
+                       curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
+                                             &curr_phys);
+                       if (!curr)
+                               goto free_desc;
+
+                       if (!prev) {
+                               mdesc->list_phys = curr_phys;
+                               mdesc->list = curr;
+                       } else {
+                               prev->node_addr = curr_phys;
+                               prev->next_desc = curr;
+                       }
+
+                       xfer_size = min_t(size_t, mdma->max_xfer_size,
+                                         buf_len);
+
+                       if (dir == DMA_MEM_TO_DEV) {
+                               mdc_list_desc_config(mchan, curr, dir, buf,
+                                                    mchan->config.dst_addr,
+                                                    xfer_size);
+                       } else {
+                               mdc_list_desc_config(mchan, curr, dir,
+                                                    mchan->config.src_addr,
+                                                    buf, xfer_size);
+                       }
+
+                       prev = curr;
+                       prev_phys = curr_phys;
+
+                       mdesc->list_len++;
+                       mdesc->list_xfer_size += xfer_size;
+                       buf += xfer_size;
+                       buf_len -= xfer_size;
+               }
+       }
+
+       return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
+
+free_desc:
+       mdc_desc_free(&mdesc->vd);
+
+       return NULL;
+}
+
+static void mdc_issue_desc(struct mdc_chan *mchan)
+{
+       struct mdc_dma *mdma = mchan->mdma;
+       struct virt_dma_desc *vd;
+       struct mdc_tx_desc *mdesc;
+       u32 val;
+
+       vd = vchan_next_desc(&mchan->vc);
+       if (!vd)
+               return;
+
+       list_del(&vd->node);
+
+       mdesc = to_mdc_desc(&vd->tx);
+       mchan->desc = mdesc;
+
+       dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
+               mchan->chan_nr);
+
+       mdma->soc->enable_chan(mchan);
+
+       val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
+       val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
+               MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
+               MDC_GENERAL_CONFIG_PHYSICAL_R;
+       mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
+       val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
+               (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
+               (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
+       mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
+       mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
+       val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
+       val |= MDC_CONTROL_AND_STATUS_LIST_EN;
+       mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
+}
+
+static void mdc_issue_pending(struct dma_chan *chan)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mchan->vc.lock, flags);
+       if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
+               mdc_issue_desc(mchan);
+       spin_unlock_irqrestore(&mchan->vc.lock, flags);
+}
+
+static enum dma_status mdc_tx_status(struct dma_chan *chan,
+       dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       struct mdc_tx_desc *mdesc;
+       struct virt_dma_desc *vd;
+       unsigned long flags;
+       size_t bytes = 0;
+       int ret;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE)
+               return ret;
+
+       if (!txstate)
+               return ret;
+
+       spin_lock_irqsave(&mchan->vc.lock, flags);
+       vd = vchan_find_desc(&mchan->vc, cookie);
+       if (vd) {
+               mdesc = to_mdc_desc(&vd->tx);
+               bytes = mdesc->list_xfer_size;
+       } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
+               struct mdc_hw_list_desc *ldesc;
+               u32 val1, val2, done, processed, residue;
+               int i, cmds;
+
+               mdesc = mchan->desc;
+
+               /*
+                * Determine the number of commands that haven't been
+                * processed (handled by the IRQ handler) yet.
+                */
+               do {
+                       val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
+                               ~MDC_CMDS_PROCESSED_INT_ACTIVE;
+                       residue = mdc_chan_readl(mchan,
+                                                MDC_ACTIVE_TRANSFER_SIZE);
+                       val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
+                               ~MDC_CMDS_PROCESSED_INT_ACTIVE;
+               } while (val1 != val2);
+
+               done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+                       MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+               processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
+                       MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
+               cmds = (done - processed) %
+                       (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
+
+               /*
+                * If the command loaded event hasn't been processed yet, then
+                * the difference above includes an extra command.
+                */
+               if (!mdesc->cmd_loaded)
+                       cmds--;
+               else
+                       cmds += mdesc->list_cmds_done;
+
+               bytes = mdesc->list_xfer_size;
+               ldesc = mdesc->list;
+               for (i = 0; i < cmds; i++) {
+                       bytes -= ldesc->xfer_size + 1;
+                       ldesc = ldesc->next_desc;
+               }
+               if (ldesc) {
+                       if (residue != MDC_TRANSFER_SIZE_MASK)
+                               bytes -= ldesc->xfer_size - residue;
+                       else
+                               bytes -= ldesc->xfer_size + 1;
+               }
+       }
+       spin_unlock_irqrestore(&mchan->vc.lock, flags);
+
+       dma_set_residue(txstate, bytes);
+
+       return ret;
+}
+
+static int mdc_terminate_all(struct dma_chan *chan)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       struct mdc_tx_desc *mdesc;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&mchan->vc.lock, flags);
+
+       mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
+                       MDC_CONTROL_AND_STATUS);
+
+       mdesc = mchan->desc;
+       mchan->desc = NULL;
+       vchan_get_all_descriptors(&mchan->vc, &head);
+
+       spin_unlock_irqrestore(&mchan->vc.lock, flags);
+
+       if (mdesc)
+               mdc_desc_free(&mdesc->vd);
+       vchan_dma_desc_free_list(&mchan->vc, &head);
+
+       return 0;
+}
+
+static int mdc_slave_config(struct dma_chan *chan,
+                           struct dma_slave_config *config)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mchan->vc.lock, flags);
+       mchan->config = *config;
+       spin_unlock_irqrestore(&mchan->vc.lock, flags);
+
+       return 0;
+}
+
+static int mdc_alloc_chan_resources(struct dma_chan *chan)
+{
+       return 0;
+}
+
+static void mdc_free_chan_resources(struct dma_chan *chan)
+{
+       struct mdc_chan *mchan = to_mdc_chan(chan);
+       struct mdc_dma *mdma = mchan->mdma;
+
+       mdc_terminate_all(chan);
+
+       mdma->soc->disable_chan(mchan);
+}
+
+static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
+{
+       struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
+       struct mdc_tx_desc *mdesc;
+       u32 val, processed, done1, done2;
+       unsigned int i;
+
+       spin_lock(&mchan->vc.lock);
+
+       val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+       processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
+               MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
+       /*
+        * CMDS_DONE may have incremented between reading CMDS_PROCESSED
+        * and clearing INT_ACTIVE.  Re-read CMDS_PROCESSED to ensure we
+        * didn't miss a command completion.
+        */
+       do {
+               val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+               done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+                       MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+               val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
+                         MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
+                        MDC_CMDS_PROCESSED_INT_ACTIVE);
+               val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
+               mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
+               val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+               done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+                       MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+       } while (done1 != done2);
+
+       dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
+
+       mdesc = mchan->desc;
+       if (!mdesc) {
+               dev_warn(mdma2dev(mchan->mdma),
+                        "IRQ with no active descriptor on channel %d\n",
+                        mchan->chan_nr);
+               goto out;
+       }
+
+       for (i = processed; i != done1;
+            i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
+               /*
+                * The first interrupt in a transfer indicates that the
+                * command list has been loaded, not that a command has
+                * been completed.
+                */
+               if (!mdesc->cmd_loaded) {
+                       mdesc->cmd_loaded = true;
+                       continue;
+               }
+
+               mdesc->list_cmds_done++;
+               if (mdesc->cyclic) {
+                       mdesc->list_cmds_done %= mdesc->list_len;
+                       if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
+                               vchan_cyclic_callback(&mdesc->vd);
+               } else if (mdesc->list_cmds_done == mdesc->list_len) {
+                       mchan->desc = NULL;
+                       vchan_cookie_complete(&mdesc->vd);
+                       mdc_issue_desc(mchan);
+                       break;
+               }
+       }
+out:
+       spin_unlock(&mchan->vc.lock);
+
+       return IRQ_HANDLED;
+}
+
+static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
+                                    struct of_dma *ofdma)
+{
+       struct mdc_dma *mdma = ofdma->of_dma_data;
+       struct dma_chan *chan;
+
+       if (dma_spec->args_count != 3)
+               return NULL;
+
+       list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
+               struct mdc_chan *mchan = to_mdc_chan(chan);
+
+               if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
+                       continue;
+               if (dma_get_slave_channel(chan)) {
+                       mchan->periph = dma_spec->args[0];
+                       mchan->thread = dma_spec->args[2];
+                       return chan;
+               }
+       }
+
+       return NULL;
+}
+
+#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch)      (0x120 + 0x4 * ((ch) / 4))
+#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
+#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK     0x3f
+
+static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
+{
+       struct mdc_dma *mdma = mchan->mdma;
+
+       regmap_update_bits(mdma->periph_regs,
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
+                          mchan->periph <<
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
+}
+
+static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
+{
+       struct mdc_dma *mdma = mchan->mdma;
+
+       regmap_update_bits(mdma->periph_regs,
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
+                          PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
+                          0);
+}
+
+static const struct mdc_dma_soc_data pistachio_mdc_data = {
+       .enable_chan = pistachio_mdc_enable_chan,
+       .disable_chan = pistachio_mdc_disable_chan,
+};
+
+static const struct of_device_id mdc_dma_of_match[] = {
+       { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
+       { },
+};
+MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
+
+static int mdc_dma_probe(struct platform_device *pdev)
+{
+       struct mdc_dma *mdma;
+       struct resource *res;
+       const struct of_device_id *match;
+       unsigned int i;
+       u32 val;
+       int ret;
+
+       mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
+       if (!mdma)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, mdma);
+
+       match = of_match_device(mdc_dma_of_match, &pdev->dev);
+       mdma->soc = match->data;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mdma->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mdma->regs))
+               return PTR_ERR(mdma->regs);
+
+       mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                           "img,cr-periph");
+       if (IS_ERR(mdma->periph_regs))
+               return PTR_ERR(mdma->periph_regs);
+
+       mdma->clk = devm_clk_get(&pdev->dev, "sys");
+       if (IS_ERR(mdma->clk))
+               return PTR_ERR(mdma->clk);
+
+       ret = clk_prepare_enable(mdma->clk);
+       if (ret)
+               return ret;
+
+       dma_cap_zero(mdma->dma_dev.cap_mask);
+       dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
+       dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
+       dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
+
+       val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
+       mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
+               MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
+       mdma->nr_threads =
+               1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
+                     MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
+       mdma->bus_width =
+               (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
+                      MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
+       /*
+        * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
+        * are supported, this makes it possible for the value reported in
+        * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
+        * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
+        * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining.  To eliminate this
+        * ambiguity, restrict transfer sizes to one bus-width less than the
+        * actual maximum.
+        */
+       mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
+
+       of_property_read_u32(pdev->dev.of_node, "dma-channels",
+                            &mdma->nr_channels);
+       ret = of_property_read_u32(pdev->dev.of_node,
+                                  "img,max-burst-multiplier",
+                                  &mdma->max_burst_mult);
+       if (ret)
+               goto disable_clk;
+
+       mdma->dma_dev.dev = &pdev->dev;
+       mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
+       mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
+       mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
+       mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
+       mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
+       mdma->dma_dev.device_tx_status = mdc_tx_status;
+       mdma->dma_dev.device_issue_pending = mdc_issue_pending;
+       mdma->dma_dev.device_terminate_all = mdc_terminate_all;
+       mdma->dma_dev.device_config = mdc_slave_config;
+
+       mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       for (i = 1; i <= mdma->bus_width; i <<= 1) {
+               mdma->dma_dev.src_addr_widths |= BIT(i);
+               mdma->dma_dev.dst_addr_widths |= BIT(i);
+       }
+
+       INIT_LIST_HEAD(&mdma->dma_dev.channels);
+       for (i = 0; i < mdma->nr_channels; i++) {
+               struct mdc_chan *mchan = &mdma->channels[i];
+
+               mchan->mdma = mdma;
+               mchan->chan_nr = i;
+               mchan->irq = platform_get_irq(pdev, i);
+               if (mchan->irq < 0) {
+                       ret = mchan->irq;
+                       goto disable_clk;
+               }
+               ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
+                                      IRQ_TYPE_LEVEL_HIGH,
+                                      dev_name(&pdev->dev), mchan);
+               if (ret < 0)
+                       goto disable_clk;
+
+               mchan->vc.desc_free = mdc_desc_free;
+               vchan_init(&mchan->vc, &mdma->dma_dev);
+       }
+
+       mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
+                                          sizeof(struct mdc_hw_list_desc),
+                                          4, 0);
+       if (!mdma->desc_pool) {
+               ret = -ENOMEM;
+               goto disable_clk;
+       }
+
+       ret = dma_async_device_register(&mdma->dma_dev);
+       if (ret)
+               goto disable_clk;
+
+       ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
+       if (ret)
+               goto unregister;
+
+       dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
+                mdma->nr_channels, mdma->nr_threads);
+
+       return 0;
+
+unregister:
+       dma_async_device_unregister(&mdma->dma_dev);
+disable_clk:
+       clk_disable_unprepare(mdma->clk);
+       return ret;
+}
+
+static int mdc_dma_remove(struct platform_device *pdev)
+{
+       struct mdc_dma *mdma = platform_get_drvdata(pdev);
+       struct mdc_chan *mchan, *next;
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&mdma->dma_dev);
+
+       list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
+                                vc.chan.device_node) {
+               list_del(&mchan->vc.chan.device_node);
+
+               synchronize_irq(mchan->irq);
+               devm_free_irq(&pdev->dev, mchan->irq, mchan);
+
+               tasklet_kill(&mchan->vc.task);
+       }
+
+       clk_disable_unprepare(mdma->clk);
+
+       return 0;
+}
+
+static struct platform_driver mdc_dma_driver = {
+       .driver = {
+               .name = "img-mdc-dma",
+               .of_match_table = of_match_ptr(mdc_dma_of_match),
+       },
+       .probe = mdc_dma_probe,
+       .remove = mdc_dma_remove,
+};
+module_platform_driver(mdc_dma_driver);
+
+MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_LICENSE("GPL v2");
index 10bbc0a675b07c7c31c9359409babd4809a9ff4e..eed405976ea92075a063437c1b349be99116ba1c 100644 (file)
@@ -230,11 +230,6 @@ static inline int is_imx1_dma(struct imxdma_engine *imxdma)
        return imxdma->devtype == IMX1_DMA;
 }
 
-static inline int is_imx21_dma(struct imxdma_engine *imxdma)
-{
-       return imxdma->devtype == IMX21_DMA;
-}
-
 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
 {
        return imxdma->devtype == IMX27_DMA;
@@ -669,69 +664,67 @@ out:
 
 }
 
-static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+static int imxdma_terminate_all(struct dma_chan *chan)
 {
        struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
-       struct dma_slave_config *dmaengine_cfg = (void *)arg;
        struct imxdma_engine *imxdma = imxdmac->imxdma;
        unsigned long flags;
-       unsigned int mode = 0;
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               imxdma_disable_hw(imxdmac);
-
-               spin_lock_irqsave(&imxdma->lock, flags);
-               list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
-               list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
-               spin_unlock_irqrestore(&imxdma->lock, flags);
-               return 0;
-       case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
-                       imxdmac->per_address = dmaengine_cfg->src_addr;
-                       imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
-                       imxdmac->word_size = dmaengine_cfg->src_addr_width;
-               } else {
-                       imxdmac->per_address = dmaengine_cfg->dst_addr;
-                       imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
-                       imxdmac->word_size = dmaengine_cfg->dst_addr_width;
-               }
 
-               switch (imxdmac->word_size) {
-               case DMA_SLAVE_BUSWIDTH_1_BYTE:
-                       mode = IMX_DMA_MEMSIZE_8;
-                       break;
-               case DMA_SLAVE_BUSWIDTH_2_BYTES:
-                       mode = IMX_DMA_MEMSIZE_16;
-                       break;
-               default:
-               case DMA_SLAVE_BUSWIDTH_4_BYTES:
-                       mode = IMX_DMA_MEMSIZE_32;
-                       break;
-               }
+       imxdma_disable_hw(imxdmac);
 
-               imxdmac->hw_chaining = 0;
+       spin_lock_irqsave(&imxdma->lock, flags);
+       list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
+       list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
+       spin_unlock_irqrestore(&imxdma->lock, flags);
+       return 0;
+}
 
-               imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
-                       ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
-                       CCR_REN;
-               imxdmac->ccr_to_device =
-                       (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
-                       ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
-               imx_dmav1_writel(imxdma, imxdmac->dma_request,
-                                DMA_RSSR(imxdmac->channel));
+static int imxdma_config(struct dma_chan *chan,
+                        struct dma_slave_config *dmaengine_cfg)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+       struct imxdma_engine *imxdma = imxdmac->imxdma;
+       unsigned int mode = 0;
 
-               /* Set burst length */
-               imx_dmav1_writel(imxdma, imxdmac->watermark_level *
-                               imxdmac->word_size, DMA_BLR(imxdmac->channel));
+       if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+               imxdmac->per_address = dmaengine_cfg->src_addr;
+               imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
+               imxdmac->word_size = dmaengine_cfg->src_addr_width;
+       } else {
+               imxdmac->per_address = dmaengine_cfg->dst_addr;
+               imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+               imxdmac->word_size = dmaengine_cfg->dst_addr_width;
+       }
 
-               return 0;
+       switch (imxdmac->word_size) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               mode = IMX_DMA_MEMSIZE_8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               mode = IMX_DMA_MEMSIZE_16;
+               break;
        default:
-               return -ENOSYS;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               mode = IMX_DMA_MEMSIZE_32;
+               break;
        }
 
-       return -EINVAL;
+       imxdmac->hw_chaining = 0;
+
+       imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
+               ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
+               CCR_REN;
+       imxdmac->ccr_to_device =
+               (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
+               ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
+       imx_dmav1_writel(imxdma, imxdmac->dma_request,
+                        DMA_RSSR(imxdmac->channel));
+
+       /* Set burst length */
+       imx_dmav1_writel(imxdma, imxdmac->watermark_level *
+                        imxdmac->word_size, DMA_BLR(imxdmac->channel));
+
+       return 0;
 }
 
 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
@@ -1184,7 +1177,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
        imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
        imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
        imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
-       imxdma->dma_device.device_control = imxdma_control;
+       imxdma->dma_device.device_config = imxdma_config;
+       imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
        imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
 
        platform_set_drvdata(pdev, imxdma);
index d0df198f62e9369148bceed097c260fdbd790a8b..18c0a131e4e41d21ec9ffab272f26258fb483011 100644 (file)
@@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        return ret;
 }
 
-static void sdma_disable_channel(struct sdma_channel *sdmac)
+static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct sdma_channel, chan);
+}
+
+static int sdma_disable_channel(struct dma_chan *chan)
 {
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
 
        writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
        sdmac->status = DMA_ERROR;
+
+       return 0;
 }
 
-static int sdma_config_channel(struct sdma_channel *sdmac)
+static int sdma_config_channel(struct dma_chan *chan)
 {
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
        int ret;
 
-       sdma_disable_channel(sdmac);
+       sdma_disable_channel(chan);
 
        sdmac->event_mask[0] = 0;
        sdmac->event_mask[1] = 0;
@@ -935,11 +944,6 @@ out:
        return ret;
 }
 
-static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
-{
-       return container_of(chan, struct sdma_channel, chan);
-}
-
 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        unsigned long flags;
@@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
 
-       sdma_disable_channel(sdmac);
+       sdma_disable_channel(chan);
 
        if (sdmac->event_id0)
                sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1203,35 +1207,24 @@ err_out:
        return NULL;
 }
 
-static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+static int sdma_config(struct dma_chan *chan,
+                      struct dma_slave_config *dmaengine_cfg)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       struct dma_slave_config *dmaengine_cfg = (void *)arg;
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               sdma_disable_channel(sdmac);
-               return 0;
-       case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
-                       sdmac->per_address = dmaengine_cfg->src_addr;
-                       sdmac->watermark_level = dmaengine_cfg->src_maxburst *
-                                               dmaengine_cfg->src_addr_width;
-                       sdmac->word_size = dmaengine_cfg->src_addr_width;
-               } else {
-                       sdmac->per_address = dmaengine_cfg->dst_addr;
-                       sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
-                                               dmaengine_cfg->dst_addr_width;
-                       sdmac->word_size = dmaengine_cfg->dst_addr_width;
-               }
-               sdmac->direction = dmaengine_cfg->direction;
-               return sdma_config_channel(sdmac);
-       default:
-               return -ENOSYS;
-       }
 
-       return -EINVAL;
+       if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+               sdmac->per_address = dmaengine_cfg->src_addr;
+               sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+                       dmaengine_cfg->src_addr_width;
+               sdmac->word_size = dmaengine_cfg->src_addr_width;
+       } else {
+               sdmac->per_address = dmaengine_cfg->dst_addr;
+               sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+                       dmaengine_cfg->dst_addr_width;
+               sdmac->word_size = dmaengine_cfg->dst_addr_width;
+       }
+       sdmac->direction = dmaengine_cfg->direction;
+       return sdma_config_channel(chan);
 }
 
 static enum dma_status sdma_tx_status(struct dma_chan *chan,
@@ -1303,15 +1296,15 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
        if (header->ram_code_start + header->ram_code_size > fw->size)
                goto err_firmware;
        switch (header->version_major) {
-               case 1:
-                       sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
-                       break;
-               case 2:
-                       sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
-                       break;
-               default:
-                       dev_err(sdma->dev, "unknown firmware version\n");
-                       goto err_firmware;
+       case 1:
+               sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+               break;
+       case 2:
+               sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+               break;
+       default:
+               dev_err(sdma->dev, "unknown firmware version\n");
+               goto err_firmware;
        }
 
        addr = (void *)header + header->script_addrs_start;
@@ -1479,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
+       sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
        if (!sdma)
                return -ENOMEM;
 
@@ -1488,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev)
        sdma->dev = &pdev->dev;
        sdma->drvdata = drvdata;
 
-       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!iores || irq < 0) {
-               ret = -EINVAL;
-               goto err_irq;
-       }
+       if (irq < 0)
+               return irq;
 
-       if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
-               ret = -EBUSY;
-               goto err_request_region;
-       }
+       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
+       if (IS_ERR(sdma->regs))
+               return PTR_ERR(sdma->regs);
 
        sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
-       if (IS_ERR(sdma->clk_ipg)) {
-               ret = PTR_ERR(sdma->clk_ipg);
-               goto err_clk;
-       }
+       if (IS_ERR(sdma->clk_ipg))
+               return PTR_ERR(sdma->clk_ipg);
 
        sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
-       if (IS_ERR(sdma->clk_ahb)) {
-               ret = PTR_ERR(sdma->clk_ahb);
-               goto err_clk;
-       }
+       if (IS_ERR(sdma->clk_ahb))
+               return PTR_ERR(sdma->clk_ahb);
 
        clk_prepare(sdma->clk_ipg);
        clk_prepare(sdma->clk_ahb);
 
-       sdma->regs = ioremap(iores->start, resource_size(iores));
-       if (!sdma->regs) {
-               ret = -ENOMEM;
-               goto err_ioremap;
-       }
-
-       ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
+       ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
+                              sdma);
        if (ret)
-               goto err_request_irq;
+               return ret;
 
        sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
-       if (!sdma->script_addrs) {
-               ret = -ENOMEM;
-               goto err_alloc;
-       }
+       if (!sdma->script_addrs)
+               return -ENOMEM;
 
        /* initially no scripts available */
        saddr_arr = (s32 *)sdma->script_addrs;
@@ -1600,7 +1579,12 @@ static int sdma_probe(struct platform_device *pdev)
        sdma->dma_device.device_tx_status = sdma_tx_status;
        sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
        sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
-       sdma->dma_device.device_control = sdma_control;
+       sdma->dma_device.device_config = sdma_config;
+       sdma->dma_device.device_terminate_all = sdma_disable_channel;
+       sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        sdma->dma_device.device_issue_pending = sdma_issue_pending;
        sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
        dma_set_max_seg_size(sdma->dma_device.dev, 65535);
@@ -1629,38 +1613,22 @@ err_register:
        dma_async_device_unregister(&sdma->dma_device);
 err_init:
        kfree(sdma->script_addrs);
-err_alloc:
-       free_irq(irq, sdma);
-err_request_irq:
-       iounmap(sdma->regs);
-err_ioremap:
-err_clk:
-       release_mem_region(iores->start, resource_size(iores));
-err_request_region:
-err_irq:
-       kfree(sdma);
        return ret;
 }
 
 static int sdma_remove(struct platform_device *pdev)
 {
        struct sdma_engine *sdma = platform_get_drvdata(pdev);
-       struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       int irq = platform_get_irq(pdev, 0);
        int i;
 
        dma_async_device_unregister(&sdma->dma_device);
        kfree(sdma->script_addrs);
-       free_irq(irq, sdma);
-       iounmap(sdma->regs);
-       release_mem_region(iores->start, resource_size(iores));
        /* Kill the tasklet */
        for (i = 0; i < MAX_DMA_CHANNELS; i++) {
                struct sdma_channel *sdmac = &sdma->channel[i];
 
                tasklet_kill(&sdmac->tasklet);
        }
-       kfree(sdma);
 
        platform_set_drvdata(pdev, NULL);
        dev_info(&pdev->dev, "Removed...\n");
index 1aab8130efa1c75ae51906938447c91d12a66ddc..5aaead9b56f7386d9536765a08ff5d727b758aee 100644 (file)
@@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
        return ret;
 }
 
-static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
+static int intel_mid_dma_config(struct dma_chan *chan,
+                               struct dma_slave_config *slave)
 {
        struct intel_mid_dma_chan       *midc = to_intel_mid_dma_chan(chan);
-       struct dma_slave_config  *slave = (struct dma_slave_config *)arg;
        struct intel_mid_dma_slave *mid_slave;
 
        BUG_ON(!midc);
@@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
        midc->mid_slave = mid_slave;
        return 0;
 }
-/**
- * intel_mid_dma_device_control -      DMA device control
- * @chan: chan for DMA control
- * @cmd: control cmd
- * @arg: cmd arg value
- *
- * Perform DMA control command
- */
-static int intel_mid_dma_device_control(struct dma_chan *chan,
-                       enum dma_ctrl_cmd cmd, unsigned long arg)
+
+static int intel_mid_dma_terminate_all(struct dma_chan *chan)
 {
        struct intel_mid_dma_chan       *midc = to_intel_mid_dma_chan(chan);
        struct middma_device    *mid = to_middma_device(chan->device);
        struct intel_mid_dma_desc       *desc, *_desc;
        union intel_mid_dma_cfg_lo cfg_lo;
 
-       if (cmd == DMA_SLAVE_CONFIG)
-               return dma_slave_control(chan, arg);
-
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
-
        spin_lock_bh(&midc->lock);
        if (midc->busy == false) {
                spin_unlock_bh(&midc->lock);
@@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
        dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
        dma->common.device_issue_pending = intel_mid_dma_issue_pending;
        dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
-       dma->common.device_control = intel_mid_dma_device_control;
+       dma->common.device_config = intel_mid_dma_config;
+       dma->common.device_terminate_all = intel_mid_dma_terminate_all;
 
        /*enable dma cntrl*/
        iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
index 32eae38291e598b7ebeb4b2eebe2c5577ba1022f..77a6dcf25b98b818b9d83ecbbf0d99edc3783056 100644 (file)
@@ -214,6 +214,11 @@ static bool is_bwd_ioat(struct pci_dev *pdev)
        case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
        case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
        case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+       /* even though not Atom, BDX-DE has same DMA silicon */
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
                return true;
        default:
                return false;
@@ -489,6 +494,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
        struct ioat_chan_common *chan = &ioat->base;
        struct pci_dev *pdev = to_pdev(chan);
        struct ioat_dma_descriptor *hw;
+       struct dma_async_tx_descriptor *tx;
        u64 phys_complete;
        struct ioat_ring_ent *desc;
        u32 err_handled = 0;
@@ -534,6 +540,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
                dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
                        __func__, chanerr, err_handled);
                BUG();
+       } else { /* cleanup the faulty descriptor */
+               tx = &desc->txd;
+               if (tx->cookie) {
+                       dma_cookie_complete(tx);
+                       dma_descriptor_unmap(tx);
+                       if (tx->callback) {
+                               tx->callback(tx->callback_param);
+                               tx->callback = NULL;
+                       }
+               }
        }
 
        writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -1300,7 +1316,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+       if (tmo == 0 ||
+           dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test xor timed out\n");
                err = -ENODEV;
                goto dma_unmap;
@@ -1366,7 +1383,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+       if (tmo == 0 ||
+           dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test validate timed out\n");
                err = -ENODEV;
                goto dma_unmap;
@@ -1418,7 +1436,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+       if (tmo == 0 ||
+           dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test 2nd validate timed out\n");
                err = -ENODEV;
                goto dma_unmap;
index 62f83e983d8d0de9e496c79bbe9b10d2951c980b..02177ecf09f89b899b92a18edb06512f79f0f383 100644 (file)
 #define PCI_DEVICE_ID_INTEL_IOAT_BWD2  0x0C52
 #define PCI_DEVICE_ID_INTEL_IOAT_BWD3  0x0C53
 
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0        0x6f50
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1        0x6f51
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2        0x6f52
+#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3        0x6f53
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
index 1d051cd045dbc43b5d4b53d42008660f62765ddd..5501eb072d6981c9a5c6d7a79c87777376979093 100644 (file)
@@ -111,6 +111,11 @@ static struct pci_device_id ioat_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
index c2b017ad139d3e22e9f2202c03ebc2cf54243eab..b54f62de92323ed52e53b384860eafd5f788ab28 100644 (file)
@@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan)
         */
 }
 
-static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                          unsigned long arg)
+static int idmac_pause(struct dma_chan *chan)
 {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        struct idmac *idmac = to_idmac(chan->device);
        struct ipu *ipu = to_ipu(idmac);
        struct list_head *list, *tmp;
        unsigned long flags;
-       int i;
 
-       switch (cmd) {
-       case DMA_PAUSE:
-               spin_lock_irqsave(&ipu->lock, flags);
-               ipu_ic_disable_task(ipu, chan->chan_id);
+       mutex_lock(&ichan->chan_mutex);
 
-               /* Return all descriptors into "prepared" state */
-               list_for_each_safe(list, tmp, &ichan->queue)
-                       list_del_init(list);
+       spin_lock_irqsave(&ipu->lock, flags);
+       ipu_ic_disable_task(ipu, chan->chan_id);
 
-               ichan->sg[0] = NULL;
-               ichan->sg[1] = NULL;
+       /* Return all descriptors into "prepared" state */
+       list_for_each_safe(list, tmp, &ichan->queue)
+               list_del_init(list);
 
-               spin_unlock_irqrestore(&ipu->lock, flags);
+       ichan->sg[0] = NULL;
+       ichan->sg[1] = NULL;
 
-               ichan->status = IPU_CHANNEL_INITIALIZED;
-               break;
-       case DMA_TERMINATE_ALL:
-               ipu_disable_channel(idmac, ichan,
-                                   ichan->status >= IPU_CHANNEL_ENABLED);
+       spin_unlock_irqrestore(&ipu->lock, flags);
 
-               tasklet_disable(&ipu->tasklet);
+       ichan->status = IPU_CHANNEL_INITIALIZED;
 
-               /* ichan->queue is modified in ISR, have to spinlock */
-               spin_lock_irqsave(&ichan->lock, flags);
-               list_splice_init(&ichan->queue, &ichan->free_list);
+       mutex_unlock(&ichan->chan_mutex);
 
-               if (ichan->desc)
-                       for (i = 0; i < ichan->n_tx_desc; i++) {
-                               struct idmac_tx_desc *desc = ichan->desc + i;
-                               if (list_empty(&desc->list))
-                                       /* Descriptor was prepared, but not submitted */
-                                       list_add(&desc->list, &ichan->free_list);
+       return 0;
+}
 
-                               async_tx_clear_ack(&desc->txd);
-                       }
+static int __idmac_terminate_all(struct dma_chan *chan)
+{
+       struct idmac_channel *ichan = to_idmac_chan(chan);
+       struct idmac *idmac = to_idmac(chan->device);
+       struct ipu *ipu = to_ipu(idmac);
+       unsigned long flags;
+       int i;
 
-               ichan->sg[0] = NULL;
-               ichan->sg[1] = NULL;
-               spin_unlock_irqrestore(&ichan->lock, flags);
+       ipu_disable_channel(idmac, ichan,
+                           ichan->status >= IPU_CHANNEL_ENABLED);
 
-               tasklet_enable(&ipu->tasklet);
+       tasklet_disable(&ipu->tasklet);
 
-               ichan->status = IPU_CHANNEL_INITIALIZED;
-               break;
-       default:
-               return -ENOSYS;
-       }
+       /* ichan->queue is modified in ISR, have to spinlock */
+       spin_lock_irqsave(&ichan->lock, flags);
+       list_splice_init(&ichan->queue, &ichan->free_list);
+
+       if (ichan->desc)
+               for (i = 0; i < ichan->n_tx_desc; i++) {
+                       struct idmac_tx_desc *desc = ichan->desc + i;
+                       if (list_empty(&desc->list))
+                               /* Descriptor was prepared, but not submitted */
+                               list_add(&desc->list, &ichan->free_list);
+
+                       async_tx_clear_ack(&desc->txd);
+               }
+
+       ichan->sg[0] = NULL;
+       ichan->sg[1] = NULL;
+       spin_unlock_irqrestore(&ichan->lock, flags);
+
+       tasklet_enable(&ipu->tasklet);
+
+       ichan->status = IPU_CHANNEL_INITIALIZED;
 
        return 0;
 }
 
-static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                        unsigned long arg)
+static int idmac_terminate_all(struct dma_chan *chan)
 {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        int ret;
 
        mutex_lock(&ichan->chan_mutex);
 
-       ret = __idmac_control(chan, cmd, arg);
+       ret = __idmac_terminate_all(chan);
 
        mutex_unlock(&ichan->chan_mutex);
 
@@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
 
        mutex_lock(&ichan->chan_mutex);
 
-       __idmac_control(chan, DMA_TERMINATE_ALL, 0);
+       __idmac_terminate_all(chan);
 
        if (ichan->status > IPU_CHANNEL_FREE) {
 #ifdef DEBUG
@@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu)
 
        /* Compulsory for DMA_SLAVE fields */
        dma->device_prep_slave_sg               = idmac_prep_slave_sg;
-       dma->device_control                     = idmac_control;
+       dma->device_pause                       = idmac_pause;
+       dma->device_terminate_all               = idmac_terminate_all;
 
        INIT_LIST_HEAD(&dma->channels);
        for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
        for (i = 0; i < IPU_CHANNELS_NUM; i++) {
                struct idmac_channel *ichan = ipu->channel + i;
 
-               idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
+               idmac_terminate_all(&ichan->dma_chan);
        }
 
        dma_async_device_unregister(&idmac->dma);
index a1de14ab2c511e2eec6b2f100999f54f769afbbd..6f7f43529ccb17a23be497defc32f7e9b424b0c7 100644 (file)
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
        num = 0;
 
        if (!c->ccfg) {
-               /* default is memtomem, without calling device_control */
+               /* default is memtomem, without calling device_config */
                c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
                c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
                c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
        return vchan_tx_prep(&c->vc, &ds->vd, flags);
 }
 
-static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
+static int k3_dma_config(struct dma_chan *chan,
+                        struct dma_slave_config *cfg)
+{
+       struct k3_dma_chan *c = to_k3_chan(chan);
+       u32 maxburst = 0, val = 0;
+       enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+
+       if (cfg == NULL)
+               return -EINVAL;
+       c->dir = cfg->direction;
+       if (c->dir == DMA_DEV_TO_MEM) {
+               c->ccfg = CX_CFG_DSTINCR;
+               c->dev_addr = cfg->src_addr;
+               maxburst = cfg->src_maxburst;
+               width = cfg->src_addr_width;
+       } else if (c->dir == DMA_MEM_TO_DEV) {
+               c->ccfg = CX_CFG_SRCINCR;
+               c->dev_addr = cfg->dst_addr;
+               maxburst = cfg->dst_maxburst;
+               width = cfg->dst_addr_width;
+       }
+       switch (width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               val =  __ffs(width);
+               break;
+       default:
+               val = 3;
+               break;
+       }
+       c->ccfg |= (val << 12) | (val << 16);
+
+       if ((maxburst == 0) || (maxburst > 16))
+               val = 16;
+       else
+               val = maxburst - 1;
+       c->ccfg |= (val << 20) | (val << 24);
+       c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+       /* specific request line */
+       c->ccfg |= c->vc.chan.chan_id << 4;
+
+       return 0;
+}
+
+static int k3_dma_terminate_all(struct dma_chan *chan)
 {
        struct k3_dma_chan *c = to_k3_chan(chan);
        struct k3_dma_dev *d = to_k3_dma(chan->device);
-       struct dma_slave_config *cfg = (void *)arg;
        struct k3_dma_phy *p = c->phy;
        unsigned long flags;
-       u32 maxburst = 0, val = 0;
-       enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
        LIST_HEAD(head);
 
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               if (cfg == NULL)
-                       return -EINVAL;
-               c->dir = cfg->direction;
-               if (c->dir == DMA_DEV_TO_MEM) {
-                       c->ccfg = CX_CFG_DSTINCR;
-                       c->dev_addr = cfg->src_addr;
-                       maxburst = cfg->src_maxburst;
-                       width = cfg->src_addr_width;
-               } else if (c->dir == DMA_MEM_TO_DEV) {
-                       c->ccfg = CX_CFG_SRCINCR;
-                       c->dev_addr = cfg->dst_addr;
-                       maxburst = cfg->dst_maxburst;
-                       width = cfg->dst_addr_width;
-               }
-               switch (width) {
-               case DMA_SLAVE_BUSWIDTH_1_BYTE:
-               case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               case DMA_SLAVE_BUSWIDTH_8_BYTES:
-                       val =  __ffs(width);
-                       break;
-               default:
-                       val = 3;
-                       break;
-               }
-               c->ccfg |= (val << 12) | (val << 16);
+       dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 
-               if ((maxburst == 0) || (maxburst > 16))
-                       val = 16;
-               else
-                       val = maxburst - 1;
-               c->ccfg |= (val << 20) | (val << 24);
-               c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+       /* Prevent this channel being scheduled */
+       spin_lock(&d->lock);
+       list_del_init(&c->node);
+       spin_unlock(&d->lock);
 
-               /* specific request line */
-               c->ccfg |= c->vc.chan.chan_id << 4;
-               break;
+       /* Clear the tx descriptor lists */
+       spin_lock_irqsave(&c->vc.lock, flags);
+       vchan_get_all_descriptors(&c->vc, &head);
+       if (p) {
+               /* vchan is assigned to a pchan - stop the channel */
+               k3_dma_terminate_chan(p, d);
+               c->phy = NULL;
+               p->vchan = NULL;
+               p->ds_run = p->ds_done = NULL;
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       vchan_dma_desc_free_list(&c->vc, &head);
 
-       case DMA_TERMINATE_ALL:
-               dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+       return 0;
+}
 
-               /* Prevent this channel being scheduled */
-               spin_lock(&d->lock);
-               list_del_init(&c->node);
-               spin_unlock(&d->lock);
+static int k3_dma_transfer_pause(struct dma_chan *chan)
+{
+       struct k3_dma_chan *c = to_k3_chan(chan);
+       struct k3_dma_dev *d = to_k3_dma(chan->device);
+       struct k3_dma_phy *p = c->phy;
 
-               /* Clear the tx descriptor lists */
-               spin_lock_irqsave(&c->vc.lock, flags);
-               vchan_get_all_descriptors(&c->vc, &head);
+       dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+       if (c->status == DMA_IN_PROGRESS) {
+               c->status = DMA_PAUSED;
                if (p) {
-                       /* vchan is assigned to a pchan - stop the channel */
-                       k3_dma_terminate_chan(p, d);
-                       c->phy = NULL;
-                       p->vchan = NULL;
-                       p->ds_run = p->ds_done = NULL;
+                       k3_dma_pause_dma(p, false);
+               } else {
+                       spin_lock(&d->lock);
+                       list_del_init(&c->node);
+                       spin_unlock(&d->lock);
                }
-               spin_unlock_irqrestore(&c->vc.lock, flags);
-               vchan_dma_desc_free_list(&c->vc, &head);
-               break;
+       }
 
-       case DMA_PAUSE:
-               dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
-               if (c->status == DMA_IN_PROGRESS) {
-                       c->status = DMA_PAUSED;
-                       if (p) {
-                               k3_dma_pause_dma(p, false);
-                       } else {
-                               spin_lock(&d->lock);
-                               list_del_init(&c->node);
-                               spin_unlock(&d->lock);
-                       }
-               }
-               break;
+       return 0;
+}
 
-       case DMA_RESUME:
-               dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
-               spin_lock_irqsave(&c->vc.lock, flags);
-               if (c->status == DMA_PAUSED) {
-                       c->status = DMA_IN_PROGRESS;
-                       if (p) {
-                               k3_dma_pause_dma(p, true);
-                       } else if (!list_empty(&c->vc.desc_issued)) {
-                               spin_lock(&d->lock);
-                               list_add_tail(&c->node, &d->chan_pending);
-                               spin_unlock(&d->lock);
-                       }
+static int k3_dma_transfer_resume(struct dma_chan *chan)
+{
+       struct k3_dma_chan *c = to_k3_chan(chan);
+       struct k3_dma_dev *d = to_k3_dma(chan->device);
+       struct k3_dma_phy *p = c->phy;
+       unsigned long flags;
+
+       dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+       spin_lock_irqsave(&c->vc.lock, flags);
+       if (c->status == DMA_PAUSED) {
+               c->status = DMA_IN_PROGRESS;
+               if (p) {
+                       k3_dma_pause_dma(p, true);
+               } else if (!list_empty(&c->vc.desc_issued)) {
+                       spin_lock(&d->lock);
+                       list_add_tail(&c->node, &d->chan_pending);
+                       spin_unlock(&d->lock);
                }
-               spin_unlock_irqrestore(&c->vc.lock, flags);
-               break;
-       default:
-               return -ENXIO;
        }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
        return 0;
 }
 
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op)
        d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
        d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
        d->slave.device_issue_pending = k3_dma_issue_pending;
-       d->slave.device_control = k3_dma_control;
+       d->slave.device_config = k3_dma_config;
+       d->slave.device_pause = k3_dma_transfer_pause;
+       d->slave.device_resume = k3_dma_transfer_resume;
+       d->slave.device_terminate_all = k3_dma_terminate_all;
        d->slave.copy_align = DMA_ALIGN;
 
        /* init virtual channel */
@@ -787,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op)
 }
 
 #ifdef CONFIG_PM_SLEEP
-static int k3_dma_suspend(struct device *dev)
+static int k3_dma_suspend_dev(struct device *dev)
 {
        struct k3_dma_dev *d = dev_get_drvdata(dev);
        u32 stat = 0;
@@ -803,7 +820,7 @@ static int k3_dma_suspend(struct device *dev)
        return 0;
 }
 
-static int k3_dma_resume(struct device *dev)
+static int k3_dma_resume_dev(struct device *dev)
 {
        struct k3_dma_dev *d = dev_get_drvdata(dev);
        int ret = 0;
@@ -818,7 +835,7 @@ static int k3_dma_resume(struct device *dev)
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
 
 static struct platform_driver k3_pdma_driver = {
        .driver         = {
index 8b8952f35e6c90f5f90e8ca8bc54ba280d272662..8926f271904e45dcf1f4ed7532afe6fc3cbdb323 100644 (file)
@@ -683,68 +683,70 @@ fail:
        return NULL;
 }
 
-static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-                           unsigned long arg)
+static int mmp_pdma_config(struct dma_chan *dchan,
+                          struct dma_slave_config *cfg)
 {
        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
-       struct dma_slave_config *cfg = (void *)arg;
-       unsigned long flags;
        u32 maxburst = 0, addr = 0;
        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
        if (!dchan)
                return -EINVAL;
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               disable_chan(chan->phy);
-               mmp_pdma_free_phy(chan);
-               spin_lock_irqsave(&chan->desc_lock, flags);
-               mmp_pdma_free_desc_list(chan, &chan->chain_pending);
-               mmp_pdma_free_desc_list(chan, &chan->chain_running);
-               spin_unlock_irqrestore(&chan->desc_lock, flags);
-               chan->idle = true;
-               break;
-       case DMA_SLAVE_CONFIG:
-               if (cfg->direction == DMA_DEV_TO_MEM) {
-                       chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
-                       maxburst = cfg->src_maxburst;
-                       width = cfg->src_addr_width;
-                       addr = cfg->src_addr;
-               } else if (cfg->direction == DMA_MEM_TO_DEV) {
-                       chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
-                       maxburst = cfg->dst_maxburst;
-                       width = cfg->dst_addr_width;
-                       addr = cfg->dst_addr;
-               }
-
-               if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
-                       chan->dcmd |= DCMD_WIDTH1;
-               else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
-                       chan->dcmd |= DCMD_WIDTH2;
-               else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
-                       chan->dcmd |= DCMD_WIDTH4;
-
-               if (maxburst == 8)
-                       chan->dcmd |= DCMD_BURST8;
-               else if (maxburst == 16)
-                       chan->dcmd |= DCMD_BURST16;
-               else if (maxburst == 32)
-                       chan->dcmd |= DCMD_BURST32;
-
-               chan->dir = cfg->direction;
-               chan->dev_addr = addr;
-               /* FIXME: drivers should be ported over to use the filter
-                * function. Once that's done, the following two lines can
-                * be removed.
-                */
-               if (cfg->slave_id)
-                       chan->drcmr = cfg->slave_id;
-               break;
-       default:
-               return -ENOSYS;
+       if (cfg->direction == DMA_DEV_TO_MEM) {
+               chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
+               maxburst = cfg->src_maxburst;
+               width = cfg->src_addr_width;
+               addr = cfg->src_addr;
+       } else if (cfg->direction == DMA_MEM_TO_DEV) {
+               chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+               maxburst = cfg->dst_maxburst;
+               width = cfg->dst_addr_width;
+               addr = cfg->dst_addr;
        }
 
+       if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+               chan->dcmd |= DCMD_WIDTH1;
+       else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+               chan->dcmd |= DCMD_WIDTH2;
+       else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+               chan->dcmd |= DCMD_WIDTH4;
+
+       if (maxburst == 8)
+               chan->dcmd |= DCMD_BURST8;
+       else if (maxburst == 16)
+               chan->dcmd |= DCMD_BURST16;
+       else if (maxburst == 32)
+               chan->dcmd |= DCMD_BURST32;
+
+       chan->dir = cfg->direction;
+       chan->dev_addr = addr;
+       /* FIXME: drivers should be ported over to use the filter
+        * function. Once that's done, the following two lines can
+        * be removed.
+        */
+       if (cfg->slave_id)
+               chan->drcmr = cfg->slave_id;
+
+       return 0;
+}
+
+static int mmp_pdma_terminate_all(struct dma_chan *dchan)
+{
+       struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+       unsigned long flags;
+
+       if (!dchan)
+               return -EINVAL;
+
+       disable_chan(chan->phy);
+       mmp_pdma_free_phy(chan);
+       spin_lock_irqsave(&chan->desc_lock, flags);
+       mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+       mmp_pdma_free_desc_list(chan, &chan->chain_running);
+       spin_unlock_irqrestore(&chan->desc_lock, flags);
+       chan->idle = true;
+
        return 0;
 }
 
@@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op)
        pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
        pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
        pdev->device.device_issue_pending = mmp_pdma_issue_pending;
-       pdev->device.device_control = mmp_pdma_control;
+       pdev->device.device_config = mmp_pdma_config;
+       pdev->device.device_terminate_all = mmp_pdma_terminate_all;
        pdev->device.copy_align = PDMA_ALIGNMENT;
 
        if (pdev->dev->coherent_dma_mask)
index bfb46957c3dcc8c97a723a29b95cb3fe5e395be9..70c2fa9963cd4d942afa0b8a99f42c4d2ede784b 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/dmaengine.h>
 #include <linux/platform_device.h>
 #include <linux/device.h>
-#include <mach/regs-icu.h>
 #include <linux/platform_data/dma-mmp_tdma.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
@@ -164,33 +163,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
        tdmac->status = DMA_IN_PROGRESS;
 }
 
-static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_disable_chan(struct dma_chan *chan)
 {
+       struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
        writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
                                        tdmac->reg_base + TDCR);
 
        tdmac->status = DMA_COMPLETE;
+
+       return 0;
 }
 
-static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_resume_chan(struct dma_chan *chan)
 {
+       struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
        writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
                                        tdmac->reg_base + TDCR);
        tdmac->status = DMA_IN_PROGRESS;
+
+       return 0;
 }
 
-static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_pause_chan(struct dma_chan *chan)
 {
+       struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
        writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
                                        tdmac->reg_base + TDCR);
        tdmac->status = DMA_PAUSED;
+
+       return 0;
 }
 
-static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
+static int mmp_tdma_config_chan(struct dma_chan *chan)
 {
+       struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
        unsigned int tdcr = 0;
 
-       mmp_tdma_disable_chan(tdmac);
+       mmp_tdma_disable_chan(chan);
 
        if (tdmac->dir == DMA_MEM_TO_DEV)
                tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
@@ -452,42 +464,34 @@ err_out:
        return NULL;
 }
 
-static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+static int mmp_tdma_terminate_all(struct dma_chan *chan)
 {
        struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
-       struct dma_slave_config *dmaengine_cfg = (void *)arg;
-       int ret = 0;
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               mmp_tdma_disable_chan(tdmac);
-               /* disable interrupt */
-               mmp_tdma_enable_irq(tdmac, false);
-               break;
-       case DMA_PAUSE:
-               mmp_tdma_pause_chan(tdmac);
-               break;
-       case DMA_RESUME:
-               mmp_tdma_resume_chan(tdmac);
-               break;
-       case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
-                       tdmac->dev_addr = dmaengine_cfg->src_addr;
-                       tdmac->burst_sz = dmaengine_cfg->src_maxburst;
-                       tdmac->buswidth = dmaengine_cfg->src_addr_width;
-               } else {
-                       tdmac->dev_addr = dmaengine_cfg->dst_addr;
-                       tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
-                       tdmac->buswidth = dmaengine_cfg->dst_addr_width;
-               }
-               tdmac->dir = dmaengine_cfg->direction;
-               return mmp_tdma_config_chan(tdmac);
-       default:
-               ret = -ENOSYS;
+
+       mmp_tdma_disable_chan(chan);
+       /* disable interrupt */
+       mmp_tdma_enable_irq(tdmac, false);
+
+       return 0;
+}
+
+static int mmp_tdma_config(struct dma_chan *chan,
+                          struct dma_slave_config *dmaengine_cfg)
+{
+       struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+       if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+               tdmac->dev_addr = dmaengine_cfg->src_addr;
+               tdmac->burst_sz = dmaengine_cfg->src_maxburst;
+               tdmac->buswidth = dmaengine_cfg->src_addr_width;
+       } else {
+               tdmac->dev_addr = dmaengine_cfg->dst_addr;
+               tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
+               tdmac->buswidth = dmaengine_cfg->dst_addr_width;
        }
+       tdmac->dir = dmaengine_cfg->direction;
 
-       return ret;
+       return mmp_tdma_config_chan(chan);
 }
 
 static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
@@ -668,7 +672,10 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
        tdev->device.device_tx_status = mmp_tdma_tx_status;
        tdev->device.device_issue_pending = mmp_tdma_issue_pending;
-       tdev->device.device_control = mmp_tdma_control;
+       tdev->device.device_config = mmp_tdma_config;
+       tdev->device.device_pause = mmp_tdma_pause_chan;
+       tdev->device.device_resume = mmp_tdma_resume_chan;
+       tdev->device.device_terminate_all = mmp_tdma_terminate_all;
        tdev->device.copy_align = TDMA_ALIGNMENT;
 
        dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
index 53032bac06e07f483389140057c11ad6777d7759..15cab7d79525914d862e5cd38344bc2e3046c95a 100644 (file)
@@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan,
        return 0;
 }
 
-static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                         unsigned long arg)
-{
-       int ret = 0;
-
-       switch (cmd) {
-       case DMA_PAUSE:
-       case DMA_RESUME:
-               return -EINVAL;
-       case DMA_TERMINATE_ALL:
-               moxart_terminate_all(chan);
-               break;
-       case DMA_SLAVE_CONFIG:
-               ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
-               break;
-       default:
-               ret = -ENOSYS;
-       }
-
-       return ret;
-}
-
 static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
        struct dma_chan *chan, struct scatterlist *sgl,
        unsigned int sg_len, enum dma_transfer_direction dir,
@@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev)
        dma->device_free_chan_resources         = moxart_free_chan_resources;
        dma->device_issue_pending               = moxart_issue_pending;
        dma->device_tx_status                   = moxart_tx_status;
-       dma->device_control                     = moxart_control;
+       dma->device_config                      = moxart_slave_config;
+       dma->device_terminate_all               = moxart_terminate_all;
        dma->dev                                = dev;
 
        INIT_LIST_HEAD(&dma->channels);
index 01bec4023de2d59469bedd6fff0c39b85b5f70aa..57d2457545f3d47ff274c04b62460c6354b31e24 100644 (file)
@@ -800,79 +800,69 @@ err_prep:
        return NULL;
 }
 
-static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                                                       unsigned long arg)
+static int mpc_dma_device_config(struct dma_chan *chan,
+                                struct dma_slave_config *cfg)
 {
-       struct mpc_dma_chan *mchan;
-       struct mpc_dma *mdma;
-       struct dma_slave_config *cfg;
+       struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
        unsigned long flags;
 
-       mchan = dma_chan_to_mpc_dma_chan(chan);
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               /* Disable channel requests */
-               mdma = dma_chan_to_mpc_dma(chan);
-
-               spin_lock_irqsave(&mchan->lock, flags);
-
-               out_8(&mdma->regs->dmacerq, chan->chan_id);
-               list_splice_tail_init(&mchan->prepared, &mchan->free);
-               list_splice_tail_init(&mchan->queued, &mchan->free);
-               list_splice_tail_init(&mchan->active, &mchan->free);
-
-               spin_unlock_irqrestore(&mchan->lock, flags);
+       /*
+        * Software constraints:
+        *  - only transfers between a peripheral device and
+        *     memory are supported;
+        *  - only peripheral devices with 4-byte FIFO access register
+        *     are supported;
+        *  - minimal transfer chunk is 4 bytes and consequently
+        *     source and destination addresses must be 4-byte aligned
+        *     and transfer size must be aligned on (4 * maxburst)
+        *     boundary;
+        *  - during the transfer RAM address is being incremented by
+        *     the size of minimal transfer chunk;
+        *  - peripheral port's address is constant during the transfer.
+        */
 
-               return 0;
+       if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+           cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+           !IS_ALIGNED(cfg->src_addr, 4) ||
+           !IS_ALIGNED(cfg->dst_addr, 4)) {
+               return -EINVAL;
+       }
 
-       case DMA_SLAVE_CONFIG:
-               /*
-                * Software constraints:
-                *  - only transfers between a peripheral device and
-                *     memory are supported;
-                *  - only peripheral devices with 4-byte FIFO access register
-                *     are supported;
-                *  - minimal transfer chunk is 4 bytes and consequently
-                *     source and destination addresses must be 4-byte aligned
-                *     and transfer size must be aligned on (4 * maxburst)
-                *     boundary;
-                *  - during the transfer RAM address is being incremented by
-                *     the size of minimal transfer chunk;
-                *  - peripheral port's address is constant during the transfer.
-                */
+       spin_lock_irqsave(&mchan->lock, flags);
 
-               cfg = (void *)arg;
+       mchan->src_per_paddr = cfg->src_addr;
+       mchan->src_tcd_nunits = cfg->src_maxburst;
+       mchan->dst_per_paddr = cfg->dst_addr;
+       mchan->dst_tcd_nunits = cfg->dst_maxburst;
 
-               if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
-                   cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
-                   !IS_ALIGNED(cfg->src_addr, 4) ||
-                   !IS_ALIGNED(cfg->dst_addr, 4)) {
-                       return -EINVAL;
-               }
+       /* Apply defaults */
+       if (mchan->src_tcd_nunits == 0)
+               mchan->src_tcd_nunits = 1;
+       if (mchan->dst_tcd_nunits == 0)
+               mchan->dst_tcd_nunits = 1;
 
-               spin_lock_irqsave(&mchan->lock, flags);
+       spin_unlock_irqrestore(&mchan->lock, flags);
 
-               mchan->src_per_paddr = cfg->src_addr;
-               mchan->src_tcd_nunits = cfg->src_maxburst;
-               mchan->dst_per_paddr = cfg->dst_addr;
-               mchan->dst_tcd_nunits = cfg->dst_maxburst;
+       return 0;
+}
 
-               /* Apply defaults */
-               if (mchan->src_tcd_nunits == 0)
-                       mchan->src_tcd_nunits = 1;
-               if (mchan->dst_tcd_nunits == 0)
-                       mchan->dst_tcd_nunits = 1;
+static int mpc_dma_device_terminate_all(struct dma_chan *chan)
+{
+       struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+       struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+       unsigned long flags;
 
-               spin_unlock_irqrestore(&mchan->lock, flags);
+       /* Disable channel requests */
+       spin_lock_irqsave(&mchan->lock, flags);
 
-               return 0;
+       out_8(&mdma->regs->dmacerq, chan->chan_id);
+       list_splice_tail_init(&mchan->prepared, &mchan->free);
+       list_splice_tail_init(&mchan->queued, &mchan->free);
+       list_splice_tail_init(&mchan->active, &mchan->free);
 
-       default:
-               /* Unknown command */
-               break;
-       }
+       spin_unlock_irqrestore(&mchan->lock, flags);
 
-       return -ENXIO;
+       return 0;
 }
 
 static int mpc_dma_probe(struct platform_device *op)
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
        dma->device_tx_status = mpc_dma_tx_status;
        dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
        dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
-       dma->device_control = mpc_dma_device_control;
+       dma->device_config = mpc_dma_device_config;
+       dma->device_terminate_all = mpc_dma_device_terminate_all;
 
        INIT_LIST_HEAD(&dma->channels);
        dma_cap_set(DMA_MEMCPY, dma->cap_mask);
index d7ac558c2c1c838a599368a4891203aa9cfab536..b03e8137b918881891bde603576f659612c40c47 100644 (file)
@@ -928,14 +928,6 @@ out:
        return err;
 }
 
-/* This driver does not implement any of the optional DMA operations. */
-static int
-mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-              unsigned long arg)
-{
-       return -ENOSYS;
-}
-
 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
 {
        struct dma_chan *chan, *_chan;
@@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
        dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
        dma_dev->device_tx_status = mv_xor_status;
        dma_dev->device_issue_pending = mv_xor_issue_pending;
-       dma_dev->device_control = mv_xor_control;
        dma_dev->dev = &pdev->dev;
 
        /* set prep routines based on capability */
index 5ea61201dbf02c9aa69682d93c40263727ef699e..829ec686dac337bbad7091db38f5271f73bd0ca9 100644 (file)
@@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
        return container_of(chan, struct mxs_dma_chan, chan);
 }
 
-static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
+static void mxs_dma_reset_chan(struct dma_chan *chan)
 {
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
@@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
        mxs_chan->status = DMA_COMPLETE;
 }
 
-static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
+static void mxs_dma_enable_chan(struct dma_chan *chan)
 {
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
@@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
        mxs_chan->reset = false;
 }
 
-static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
+static void mxs_dma_disable_chan(struct dma_chan *chan)
 {
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+
        mxs_chan->status = DMA_COMPLETE;
 }
 
-static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
+static int mxs_dma_pause_chan(struct dma_chan *chan)
 {
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
@@ -291,10 +296,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
                        mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 
        mxs_chan->status = DMA_PAUSED;
+       return 0;
 }
 
-static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
+static int mxs_dma_resume_chan(struct dma_chan *chan)
 {
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
@@ -307,6 +314,7 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
                        mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
 
        mxs_chan->status = DMA_IN_PROGRESS;
+       return 0;
 }
 
 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -383,7 +391,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
                        "%s: error in channel %d\n", __func__,
                        chan);
                mxs_chan->status = DMA_ERROR;
-               mxs_dma_reset_chan(mxs_chan);
+               mxs_dma_reset_chan(&mxs_chan->chan);
        } else if (mxs_chan->status != DMA_COMPLETE) {
                if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
                        mxs_chan->status = DMA_IN_PROGRESS;
@@ -432,7 +440,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
        if (ret)
                goto err_clk;
 
-       mxs_dma_reset_chan(mxs_chan);
+       mxs_dma_reset_chan(chan);
 
        dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
        mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -456,7 +464,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 
-       mxs_dma_disable_chan(mxs_chan);
+       mxs_dma_disable_chan(chan);
 
        free_irq(mxs_chan->chan_irq, mxs_dma);
 
@@ -651,28 +659,12 @@ err_out:
        return NULL;
 }
 
-static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+static int mxs_dma_terminate_all(struct dma_chan *chan)
 {
-       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-       int ret = 0;
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               mxs_dma_reset_chan(mxs_chan);
-               mxs_dma_disable_chan(mxs_chan);
-               break;
-       case DMA_PAUSE:
-               mxs_dma_pause_chan(mxs_chan);
-               break;
-       case DMA_RESUME:
-               mxs_dma_resume_chan(mxs_chan);
-               break;
-       default:
-               ret = -ENOSYS;
-       }
+       mxs_dma_reset_chan(chan);
+       mxs_dma_disable_chan(chan);
 
-       return ret;
+       return 0;
 }
 
 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
@@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
        return mxs_chan->status;
 }
 
-static void mxs_dma_issue_pending(struct dma_chan *chan)
-{
-       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-
-       mxs_dma_enable_chan(mxs_chan);
-}
-
 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
 {
        int ret;
@@ -860,8 +845,14 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
        mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
        mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
        mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
-       mxs_dma->dma_device.device_control = mxs_dma_control;
-       mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
+       mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
+       mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
+       mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
+       mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
 
        ret = dma_async_device_register(&mxs_dma->dma_device);
        if (ret) {
index d7d61e1a01c30f9c025dbf110cdb31f20074fbf8..88b77c98365d882730d8fd5db51066b4dad32aae 100644 (file)
@@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
         * pauses DMA and reads out data received via DMA as well as those left
         * in the Rx FIFO. For this to work with the RAM side using burst
         * transfers we enable the SBE bit and terminate the transfer in our
-        * DMA_PAUSE handler.
+        * .device_pause handler.
         */
        mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
 
@@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf)
        nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
 }
 
-static void nbpf_pause(struct nbpf_channel *chan)
-{
-       nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
-       /* See comment in nbpf_prep_one() */
-       nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
-}
-
 /*             Generic part                    */
 
 /* DMA ENGINE functions */
@@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan)
        }
 }
 
-static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-                       unsigned long arg)
+static int nbpf_pause(struct dma_chan *dchan)
 {
        struct nbpf_channel *chan = nbpf_to_chan(dchan);
-       struct dma_slave_config *config;
 
-       dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd);
+       dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               dev_dbg(dchan->device->dev, "Terminating\n");
-               nbpf_chan_halt(chan);
-               nbpf_chan_idle(chan);
-               break;
+       chan->paused = true;
+       nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
+       /* See comment in nbpf_prep_one() */
+       nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
 
-       case DMA_SLAVE_CONFIG:
-               if (!arg)
-                       return -EINVAL;
-               config = (struct dma_slave_config *)arg;
+       return 0;
+}
 
-               /*
-                * We could check config->slave_id to match chan->terminal here,
-                * but with DT they would be coming from the same source, so
-                * such a check would be superflous
-                */
+static int nbpf_terminate_all(struct dma_chan *dchan)
+{
+       struct nbpf_channel *chan = nbpf_to_chan(dchan);
 
-               chan->slave_dst_addr = config->dst_addr;
-               chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
-                                                      config->dst_addr_width, 1);
-               chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
-                                                      config->dst_addr_width,
-                                                      config->dst_maxburst);
-               chan->slave_src_addr = config->src_addr;
-               chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
-                                                      config->src_addr_width, 1);
-               chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
-                                                      config->src_addr_width,
-                                                      config->src_maxburst);
-               break;
+       dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
+       dev_dbg(dchan->device->dev, "Terminating\n");
 
-       case DMA_PAUSE:
-               chan->paused = true;
-               nbpf_pause(chan);
-               break;
+       nbpf_chan_halt(chan);
+       nbpf_chan_idle(chan);
 
-       default:
-               return -ENXIO;
-       }
+       return 0;
+}
+
+static int nbpf_config(struct dma_chan *dchan,
+                      struct dma_slave_config *config)
+{
+       struct nbpf_channel *chan = nbpf_to_chan(dchan);
+
+       dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
+
+       /*
+        * We could check config->slave_id to match chan->terminal here,
+        * but with DT they would be coming from the same source, so
+        * such a check would be superflous
+        */
+
+       chan->slave_dst_addr = config->dst_addr;
+       chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
+                                              config->dst_addr_width, 1);
+       chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
+                                              config->dst_addr_width,
+                                              config->dst_maxburst);
+       chan->slave_src_addr = config->src_addr;
+       chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
+                                              config->src_addr_width, 1);
+       chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
+                                              config->src_addr_width,
+                                              config->src_maxburst);
 
        return 0;
 }
@@ -1072,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan)
        }
 }
 
-static int nbpf_slave_caps(struct dma_chan *dchan,
-                          struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
-       caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = false;
-       caps->cmd_terminate = true;
-
-       return 0;
-}
-
 static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
                                      struct of_dma *ofdma)
 {
@@ -1414,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev)
        dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
        dma_dev->device_tx_status = nbpf_tx_status;
        dma_dev->device_issue_pending = nbpf_issue_pending;
-       dma_dev->device_slave_caps = nbpf_slave_caps;
 
        /*
         * If we drop support for unaligned MEMCPY buffer addresses and / or
@@ -1426,7 +1410,13 @@ static int nbpf_probe(struct platform_device *pdev)
 
        /* Compulsory for DMA_SLAVE fields */
        dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
-       dma_dev->device_control = nbpf_control;
+       dma_dev->device_config = nbpf_config;
+       dma_dev->device_pause = nbpf_pause;
+       dma_dev->device_terminate_all = nbpf_terminate_all;
+
+       dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
+       dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
+       dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 
        platform_set_drvdata(pdev, nbpf);
 
index d5fbeaa1e7ba76f25a20b7d266a26db3c3204c38..ca31f1b45366d232339cbf0ed17d5ee0ee9e0ca5 100644 (file)
@@ -159,6 +159,10 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
                return ERR_PTR(-ENODEV);
        }
 
+       /* Silently fail if there is not even the "dmas" property */
+       if (!of_find_property(np, "dmas", NULL))
+               return ERR_PTR(-ENODEV);
+
        count = of_property_count_strings(np, "dma-names");
        if (count < 0) {
                pr_err("%s: dma-names property of node '%s' missing or empty\n",
index c0016a68b44659d4965e69d73653c4860c1c49d2..7dd6dd1216819543aae06f2d3dd2707d16917c24 100644 (file)
@@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
        return vchan_tx_prep(&c->vc, &d->vd, flags);
 }
 
-static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
 {
+       struct omap_chan *c = to_omap_dma_chan(chan);
+
        if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
            cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
                return -EINVAL;
@@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c
        return 0;
 }
 
-static int omap_dma_terminate_all(struct omap_chan *c)
+static int omap_dma_terminate_all(struct dma_chan *chan)
 {
+       struct omap_chan *c = to_omap_dma_chan(chan);
        struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
        unsigned long flags;
        LIST_HEAD(head);
@@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c)
        return 0;
 }
 
-static int omap_dma_pause(struct omap_chan *c)
+static int omap_dma_pause(struct dma_chan *chan)
 {
+       struct omap_chan *c = to_omap_dma_chan(chan);
+
        /* Pause/Resume only allowed with cyclic mode */
        if (!c->cyclic)
                return -EINVAL;
@@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c)
        return 0;
 }
 
-static int omap_dma_resume(struct omap_chan *c)
+static int omap_dma_resume(struct dma_chan *chan)
 {
+       struct omap_chan *c = to_omap_dma_chan(chan);
+
        /* Pause/Resume only allowed with cyclic mode */
        if (!c->cyclic)
                return -EINVAL;
@@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c)
        return 0;
 }
 
-static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       int ret;
-
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
-               break;
-
-       case DMA_TERMINATE_ALL:
-               ret = omap_dma_terminate_all(c);
-               break;
-
-       case DMA_PAUSE:
-               ret = omap_dma_pause(c);
-               break;
-
-       case DMA_RESUME:
-               ret = omap_dma_resume(c);
-               break;
-
-       default:
-               ret = -ENXIO;
-               break;
-       }
-
-       return ret;
-}
-
 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
 {
        struct omap_chan *c;
@@ -1094,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od)
                                 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
                                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
-static int omap_dma_device_slave_caps(struct dma_chan *dchan,
-                                     struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
-       caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = true;
-       caps->cmd_terminate = true;
-       caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-       return 0;
-}
-
 static int omap_dma_probe(struct platform_device *pdev)
 {
        struct omap_dmadev *od;
@@ -1136,8 +1099,14 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.device_issue_pending = omap_dma_issue_pending;
        od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
        od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
-       od->ddev.device_control = omap_dma_control;
-       od->ddev.device_slave_caps = omap_dma_device_slave_caps;
+       od->ddev.device_config = omap_dma_slave_config;
+       od->ddev.device_pause = omap_dma_pause;
+       od->ddev.device_resume = omap_dma_resume;
+       od->ddev.device_terminate_all = omap_dma_terminate_all;
+       od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
+       od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
+       od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
        INIT_LIST_HEAD(&od->pending);
index 6e0e47d76b231e6cceda632e921efbaa035e1d81..35c143cb88da1c676d47f714f3fed146010083fc 100644 (file)
@@ -665,16 +665,12 @@ err_desc_get:
        return NULL;
 }
 
-static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                            unsigned long arg)
+static int pd_device_terminate_all(struct dma_chan *chan)
 {
        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
        struct pch_dma_desc *desc, *_d;
        LIST_HEAD(list);
 
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
-
        spin_lock_irq(&pd_chan->lock);
 
        pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
        pd->dma.device_tx_status = pd_tx_status;
        pd->dma.device_issue_pending = pd_issue_pending;
        pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
-       pd->dma.device_control = pd_device_control;
+       pd->dma.device_terminate_all = pd_device_terminate_all;
 
        err = dma_async_device_register(&pd->dma);
        if (err) {
index bdf40b5300328886f139801548542723330218c2..0e1f56772855d8fc7f1194099a4c113a6b1c8ddf 100644 (file)
@@ -504,6 +504,9 @@ struct dma_pl330_desc {
 
        enum desc_status status;
 
+       int bytes_requested;
+       bool last;
+
        /* The channel which currently holds this desc */
        struct dma_pl330_chan *pchan;
 
@@ -1048,6 +1051,10 @@ static bool _trigger(struct pl330_thread *thrd)
        if (!req)
                return true;
 
+       /* Return if req is running */
+       if (idx == thrd->req_running)
+               return true;
+
        desc = req->desc;
 
        ns = desc->rqcfg.nonsecure ? 1 : 0;
@@ -1587,6 +1594,8 @@ static int pl330_update(struct pl330_dmac *pl330)
                        descdone = thrd->req[active].desc;
                        thrd->req[active].desc = NULL;
 
+                       thrd->req_running = -1;
+
                        /* Get going again ASAP */
                        _start(thrd);
 
@@ -2086,77 +2095,89 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
        return 1;
 }
 
-static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
+static int pl330_config(struct dma_chan *chan,
+                       struct dma_slave_config *slave_config)
+{
+       struct dma_pl330_chan *pch = to_pchan(chan);
+
+       if (slave_config->direction == DMA_MEM_TO_DEV) {
+               if (slave_config->dst_addr)
+                       pch->fifo_addr = slave_config->dst_addr;
+               if (slave_config->dst_addr_width)
+                       pch->burst_sz = __ffs(slave_config->dst_addr_width);
+               if (slave_config->dst_maxburst)
+                       pch->burst_len = slave_config->dst_maxburst;
+       } else if (slave_config->direction == DMA_DEV_TO_MEM) {
+               if (slave_config->src_addr)
+                       pch->fifo_addr = slave_config->src_addr;
+               if (slave_config->src_addr_width)
+                       pch->burst_sz = __ffs(slave_config->src_addr_width);
+               if (slave_config->src_maxburst)
+                       pch->burst_len = slave_config->src_maxburst;
+       }
+
+       return 0;
+}
+
+static int pl330_terminate_all(struct dma_chan *chan)
 {
        struct dma_pl330_chan *pch = to_pchan(chan);
        struct dma_pl330_desc *desc;
        unsigned long flags;
        struct pl330_dmac *pl330 = pch->dmac;
-       struct dma_slave_config *slave_config;
        LIST_HEAD(list);
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               pm_runtime_get_sync(pl330->ddma.dev);
-               spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock(&pl330->lock);
+       _stop(pch->thread);
+       spin_unlock(&pl330->lock);
+
+       pch->thread->req[0].desc = NULL;
+       pch->thread->req[1].desc = NULL;
+       pch->thread->req_running = -1;
+
+       /* Mark all desc done */
+       list_for_each_entry(desc, &pch->submitted_list, node) {
+               desc->status = FREE;
+               dma_cookie_complete(&desc->txd);
+       }
 
-               spin_lock(&pl330->lock);
-               _stop(pch->thread);
-               spin_unlock(&pl330->lock);
+       list_for_each_entry(desc, &pch->work_list , node) {
+               desc->status = FREE;
+               dma_cookie_complete(&desc->txd);
+       }
 
-               pch->thread->req[0].desc = NULL;
-               pch->thread->req[1].desc = NULL;
-               pch->thread->req_running = -1;
+       list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
+       list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
+       list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
+       spin_unlock_irqrestore(&pch->lock, flags);
 
-               /* Mark all desc done */
-               list_for_each_entry(desc, &pch->submitted_list, node) {
-                       desc->status = FREE;
-                       dma_cookie_complete(&desc->txd);
-               }
+       return 0;
+}
 
-               list_for_each_entry(desc, &pch->work_list , node) {
-                       desc->status = FREE;
-                       dma_cookie_complete(&desc->txd);
-               }
+/*
+ * We don't support DMA_RESUME command because of hardware
+ * limitations, so after pausing the channel we cannot restore
+ * it to active state. We have to terminate channel and setup
+ * DMA transfer again. This pause feature was implemented to
+ * allow safely read residue before channel termination.
+ */
+int pl330_pause(struct dma_chan *chan)
+{
+       struct dma_pl330_chan *pch = to_pchan(chan);
+       struct pl330_dmac *pl330 = pch->dmac;
+       unsigned long flags;
 
-               list_for_each_entry(desc, &pch->completed_list , node) {
-                       desc->status = FREE;
-                       dma_cookie_complete(&desc->txd);
-               }
+       pm_runtime_get_sync(pl330->ddma.dev);
+       spin_lock_irqsave(&pch->lock, flags);
 
-               if (!list_empty(&pch->work_list))
-                       pm_runtime_put(pl330->ddma.dev);
+       spin_lock(&pl330->lock);
+       _stop(pch->thread);
+       spin_unlock(&pl330->lock);
 
-               list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
-               list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
-               list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
-               spin_unlock_irqrestore(&pch->lock, flags);
-               pm_runtime_mark_last_busy(pl330->ddma.dev);
-               pm_runtime_put_autosuspend(pl330->ddma.dev);
-               break;
-       case DMA_SLAVE_CONFIG:
-               slave_config = (struct dma_slave_config *)arg;
-
-               if (slave_config->direction == DMA_MEM_TO_DEV) {
-                       if (slave_config->dst_addr)
-                               pch->fifo_addr = slave_config->dst_addr;
-                       if (slave_config->dst_addr_width)
-                               pch->burst_sz = __ffs(slave_config->dst_addr_width);
-                       if (slave_config->dst_maxburst)
-                               pch->burst_len = slave_config->dst_maxburst;
-               } else if (slave_config->direction == DMA_DEV_TO_MEM) {
-                       if (slave_config->src_addr)
-                               pch->fifo_addr = slave_config->src_addr;
-                       if (slave_config->src_addr_width)
-                               pch->burst_sz = __ffs(slave_config->src_addr_width);
-                       if (slave_config->src_maxburst)
-                               pch->burst_len = slave_config->src_maxburst;
-               }
-               break;
-       default:
-               dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
-               return -ENXIO;
-       }
+       spin_unlock_irqrestore(&pch->lock, flags);
+       pm_runtime_mark_last_busy(pl330->ddma.dev);
+       pm_runtime_put_autosuspend(pl330->ddma.dev);
 
        return 0;
 }
@@ -2182,11 +2203,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
        pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
 
+int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
+               struct dma_pl330_desc *desc)
+{
+       struct pl330_thread *thrd = pch->thread;
+       struct pl330_dmac *pl330 = pch->dmac;
+       void __iomem *regs = thrd->dmac->base;
+       u32 val, addr;
+
+       pm_runtime_get_sync(pl330->ddma.dev);
+       val = addr = 0;
+       if (desc->rqcfg.src_inc) {
+               val = readl(regs + SA(thrd->id));
+               addr = desc->px.src_addr;
+       } else {
+               val = readl(regs + DA(thrd->id));
+               addr = desc->px.dst_addr;
+       }
+       pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+       pm_runtime_put_autosuspend(pl330->ddma.dev);
+       return val - addr;
+}
+
 static enum dma_status
 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                 struct dma_tx_state *txstate)
 {
-       return dma_cookie_status(chan, cookie, txstate);
+       enum dma_status ret;
+       unsigned long flags;
+       struct dma_pl330_desc *desc, *running = NULL;
+       struct dma_pl330_chan *pch = to_pchan(chan);
+       unsigned int transferred, residual = 0;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       if (!txstate)
+               return ret;
+
+       if (ret == DMA_COMPLETE)
+               goto out;
+
+       spin_lock_irqsave(&pch->lock, flags);
+
+       if (pch->thread->req_running != -1)
+               running = pch->thread->req[pch->thread->req_running].desc;
+
+       /* Check in pending list */
+       list_for_each_entry(desc, &pch->work_list, node) {
+               if (desc->status == DONE)
+                       transferred = desc->bytes_requested;
+               else if (running && desc == running)
+                       transferred =
+                               pl330_get_current_xferred_count(pch, desc);
+               else
+                       transferred = 0;
+               residual += desc->bytes_requested - transferred;
+               if (desc->txd.cookie == cookie) {
+                       ret = desc->status;
+                       break;
+               }
+               if (desc->last)
+                       residual = 0;
+       }
+       spin_unlock_irqrestore(&pch->lock, flags);
+
+out:
+       dma_set_residue(txstate, residual);
+
+       return ret;
 }
 
 static void pl330_issue_pending(struct dma_chan *chan)
@@ -2231,12 +2315,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
                        desc->txd.callback = last->txd.callback;
                        desc->txd.callback_param = last->txd.callback_param;
                }
+               last->last = false;
 
                dma_cookie_assign(&desc->txd);
 
                list_move_tail(&desc->node, &pch->submitted_list);
        }
 
+       last->last = true;
        cookie = dma_cookie_assign(&last->txd);
        list_add_tail(&last->node, &pch->submitted_list);
        spin_unlock_irqrestore(&pch->lock, flags);
@@ -2459,6 +2545,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
                desc->rqtype = direction;
                desc->rqcfg.brst_size = pch->burst_sz;
                desc->rqcfg.brst_len = 1;
+               desc->bytes_requested = period_len;
                fill_px(&desc->px, dst, src, period_len);
 
                if (!first)
@@ -2601,6 +2688,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                desc->rqcfg.brst_size = pch->burst_sz;
                desc->rqcfg.brst_len = 1;
                desc->rqtype = direction;
+               desc->bytes_requested = sg_dma_len(sg);
        }
 
        /* Return the last desc in the chain */
@@ -2623,19 +2711,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
 
-static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
-       struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
-       caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = false;
-       caps->cmd_terminate = true;
-       caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-
-       return 0;
-}
-
 /*
  * Runtime PM callbacks are provided by amba/bus.c driver.
  *
@@ -2793,9 +2868,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
        pd->device_tx_status = pl330_tx_status;
        pd->device_prep_slave_sg = pl330_prep_slave_sg;
-       pd->device_control = pl330_control;
+       pd->device_config = pl330_config;
+       pd->device_pause = pl330_pause;
+       pd->device_terminate_all = pl330_terminate_all;
        pd->device_issue_pending = pl330_issue_pending;
-       pd->device_slave_caps = pl330_dma_device_slave_caps;
+       pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
+       pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
+       pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 
        ret = dma_async_device_register(pd);
        if (ret) {
@@ -2847,7 +2927,7 @@ probe_err3:
 
                /* Flush the channel */
                if (pch->thread) {
-                       pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+                       pl330_terminate_all(&pch->chan);
                        pl330_free_chan_resources(&pch->chan);
                }
        }
@@ -2878,7 +2958,7 @@ static int pl330_remove(struct amba_device *adev)
 
                /* Flush the channel */
                if (pch->thread) {
-                       pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+                       pl330_terminate_all(&pch->chan);
                        pl330_free_chan_resources(&pch->chan);
                }
        }
index 3122a99ec06b8d92154d1c61fcbb62ce61622a7b..d7a33b3ac46603883e61577f80e29076c0b71221 100644 (file)
@@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan)
  * Sets slave configuration for channel
  *
  */
-static void bam_slave_config(struct bam_chan *bchan,
-               struct dma_slave_config *cfg)
+static int bam_slave_config(struct dma_chan *chan,
+                           struct dma_slave_config *cfg)
 {
+       struct bam_chan *bchan = to_bam_chan(chan);
+       unsigned long flag;
+
+       spin_lock_irqsave(&bchan->vc.lock, flag);
        memcpy(&bchan->slave, cfg, sizeof(*cfg));
        bchan->reconfigure = 1;
+       spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+       return 0;
 }
 
 /**
@@ -627,8 +634,9 @@ err_out:
  * No callbacks are done
  *
  */
-static void bam_dma_terminate_all(struct bam_chan *bchan)
+static int bam_dma_terminate_all(struct dma_chan *chan)
 {
+       struct bam_chan *bchan = to_bam_chan(chan);
        unsigned long flag;
        LIST_HEAD(head);
 
@@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan)
        spin_unlock_irqrestore(&bchan->vc.lock, flag);
 
        vchan_dma_desc_free_list(&bchan->vc, &head);
+
+       return 0;
 }
 
 /**
- * bam_control - DMA device control
+ * bam_pause - Pause DMA channel
  * @chan: dma channel
- * @cmd: control cmd
- * @arg: cmd argument
  *
- * Perform DMA control command
+ */
+static int bam_pause(struct dma_chan *chan)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       struct bam_device *bdev = bchan->bdev;
+       unsigned long flag;
+
+       spin_lock_irqsave(&bchan->vc.lock, flag);
+       writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
+       bchan->paused = 1;
+       spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+       return 0;
+}
+
+/**
+ * bam_resume - Resume DMA channel operations
+ * @chan: dma channel
  *
  */
-static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
+static int bam_resume(struct dma_chan *chan)
 {
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
-       int ret = 0;
        unsigned long flag;
 
-       switch (cmd) {
-       case DMA_PAUSE:
-               spin_lock_irqsave(&bchan->vc.lock, flag);
-               writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
-               bchan->paused = 1;
-               spin_unlock_irqrestore(&bchan->vc.lock, flag);
-               break;
-
-       case DMA_RESUME:
-               spin_lock_irqsave(&bchan->vc.lock, flag);
-               writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
-               bchan->paused = 0;
-               spin_unlock_irqrestore(&bchan->vc.lock, flag);
-               break;
-
-       case DMA_TERMINATE_ALL:
-               bam_dma_terminate_all(bchan);
-               break;
-
-       case DMA_SLAVE_CONFIG:
-               spin_lock_irqsave(&bchan->vc.lock, flag);
-               bam_slave_config(bchan, (struct dma_slave_config *)arg);
-               spin_unlock_irqrestore(&bchan->vc.lock, flag);
-               break;
-
-       default:
-               ret = -ENXIO;
-               break;
-       }
+       spin_lock_irqsave(&bchan->vc.lock, flag);
+       writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
+       bchan->paused = 0;
+       spin_unlock_irqrestore(&bchan->vc.lock, flag);
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev)
        bdev->common.device_alloc_chan_resources = bam_alloc_chan;
        bdev->common.device_free_chan_resources = bam_free_chan;
        bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
-       bdev->common.device_control = bam_control;
+       bdev->common.device_config = bam_slave_config;
+       bdev->common.device_pause = bam_pause;
+       bdev->common.device_resume = bam_resume;
+       bdev->common.device_terminate_all = bam_dma_terminate_all;
        bdev->common.device_issue_pending = bam_issue_pending;
        bdev->common.device_tx_status = bam_tx_status;
        bdev->common.dev = bdev->dev;
@@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev)
        devm_free_irq(bdev->dev, bdev->irq, bdev);
 
        for (i = 0; i < bdev->num_channels; i++) {
-               bam_dma_terminate_all(&bdev->channels[i]);
+               bam_dma_terminate_all(&bdev->channels[i].vc.chan);
                tasklet_kill(&bdev->channels[i].vc.task);
 
                dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
index 6941a77521c3d895dbe220a1190d8e64a9acf138..2f91da3db8361cdba3dbd51e120b299607a42485 100644 (file)
@@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
        return tc * txd->width;
 }
 
-static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
+static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
                                  struct dma_slave_config *config)
 {
-       if (!s3cchan->slave)
-               return -EINVAL;
+       struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+       unsigned long flags;
+       int ret = 0;
 
        /* Reject definitely invalid configurations */
        if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
            config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
                return -EINVAL;
 
+       spin_lock_irqsave(&s3cchan->vc.lock, flags);
+
+       if (!s3cchan->slave) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        s3cchan->cfg = *config;
 
-       return 0;
+out:
+       spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+       return ret;
 }
 
 /*
@@ -703,8 +713,7 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
  * The DMA ENGINE API
  */
 
-static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                        unsigned long arg)
+static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
 {
        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
@@ -713,40 +722,28 @@ static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               ret = s3c24xx_dma_set_runtime_config(s3cchan,
-                                             (struct dma_slave_config *)arg);
-               break;
-       case DMA_TERMINATE_ALL:
-               if (!s3cchan->phy && !s3cchan->at) {
-                       dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
-                               s3cchan->id);
-                       ret = -EINVAL;
-                       break;
-               }
+       if (!s3cchan->phy && !s3cchan->at) {
+               dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
+                       s3cchan->id);
+               ret = -EINVAL;
+               goto unlock;
+       }
 
-               s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+       s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
 
-                /* Mark physical channel as free */
-               if (s3cchan->phy)
-                       s3c24xx_dma_phy_free(s3cchan);
+       /* Mark physical channel as free */
+       if (s3cchan->phy)
+               s3c24xx_dma_phy_free(s3cchan);
 
-               /* Dequeue current job */
-               if (s3cchan->at) {
-                       s3c24xx_dma_desc_free(&s3cchan->at->vd);
-                       s3cchan->at = NULL;
-               }
-
-               /* Dequeue jobs not yet fired as well */
-               s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
-               break;
-       default:
-               /* Unknown command */
-               ret = -ENXIO;
-               break;
+       /* Dequeue current job */
+       if (s3cchan->at) {
+               s3c24xx_dma_desc_free(&s3cchan->at->vd);
+               s3cchan->at = NULL;
        }
 
+       /* Dequeue jobs not yet fired as well */
+       s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+unlock:
        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 
        return ret;
@@ -1300,7 +1297,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
        s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
        s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
        s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
-       s3cdma->memcpy.device_control = s3c24xx_dma_control;
+       s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
+       s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
 
        /* Initialize slave engine for SoC internal dedicated peripherals */
        dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1315,7 +1313,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
        s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
        s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
        s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
-       s3cdma->slave.device_control = s3c24xx_dma_control;
+       s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
+       s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
 
        /* Register as many memcpy channels as there are physical channels */
        ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
index 96bb62c39c41bf48f63409fcad5bc598eb0bb5e7..5adf5407a8cb83a70faec26c6182538df958bcb5 100644 (file)
@@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
        return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 }
 
-static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
+static int sa11x0_dma_device_config(struct dma_chan *chan,
+                                   struct dma_slave_config *cfg)
 {
+       struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
        u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
        dma_addr_t addr;
        enum dma_slave_buswidth width;
@@ -704,99 +706,101 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
        return 0;
 }
 
-static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
+static int sa11x0_dma_device_pause(struct dma_chan *chan)
 {
        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
        struct sa11x0_dma_phy *p;
        LIST_HEAD(head);
        unsigned long flags;
-       int ret;
 
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
-
-       case DMA_TERMINATE_ALL:
-               dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
-               /* Clear the tx descriptor lists */
-               spin_lock_irqsave(&c->vc.lock, flags);
-               vchan_get_all_descriptors(&c->vc, &head);
+       dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+       spin_lock_irqsave(&c->vc.lock, flags);
+       if (c->status == DMA_IN_PROGRESS) {
+               c->status = DMA_PAUSED;
 
                p = c->phy;
                if (p) {
-                       dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
-                       /* vchan is assigned to a pchan - stop the channel */
-                       writel(DCSR_RUN | DCSR_IE |
-                               DCSR_STRTA | DCSR_DONEA |
-                               DCSR_STRTB | DCSR_DONEB,
-                               p->base + DMA_DCSR_C);
-
-                       if (p->txd_load) {
-                               if (p->txd_load != p->txd_done)
-                                       list_add_tail(&p->txd_load->vd.node, &head);
-                               p->txd_load = NULL;
-                       }
-                       if (p->txd_done) {
-                               list_add_tail(&p->txd_done->vd.node, &head);
-                               p->txd_done = NULL;
-                       }
-                       c->phy = NULL;
+                       writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
+               } else {
                        spin_lock(&d->lock);
-                       p->vchan = NULL;
+                       list_del_init(&c->node);
                        spin_unlock(&d->lock);
-                       tasklet_schedule(&d->task);
                }
-               spin_unlock_irqrestore(&c->vc.lock, flags);
-               vchan_dma_desc_free_list(&c->vc, &head);
-               ret = 0;
-               break;
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
 
-       case DMA_PAUSE:
-               dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
-               spin_lock_irqsave(&c->vc.lock, flags);
-               if (c->status == DMA_IN_PROGRESS) {
-                       c->status = DMA_PAUSED;
+       return 0;
+}
 
-                       p = c->phy;
-                       if (p) {
-                               writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
-                       } else {
-                               spin_lock(&d->lock);
-                               list_del_init(&c->node);
-                               spin_unlock(&d->lock);
-                       }
-               }
-               spin_unlock_irqrestore(&c->vc.lock, flags);
-               ret = 0;
-               break;
+static int sa11x0_dma_device_resume(struct dma_chan *chan)
+{
+       struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+       struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+       struct sa11x0_dma_phy *p;
+       LIST_HEAD(head);
+       unsigned long flags;
 
-       case DMA_RESUME:
-               dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
-               spin_lock_irqsave(&c->vc.lock, flags);
-               if (c->status == DMA_PAUSED) {
-                       c->status = DMA_IN_PROGRESS;
-
-                       p = c->phy;
-                       if (p) {
-                               writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
-                       } else if (!list_empty(&c->vc.desc_issued)) {
-                               spin_lock(&d->lock);
-                               list_add_tail(&c->node, &d->chan_pending);
-                               spin_unlock(&d->lock);
-                       }
+       dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+       spin_lock_irqsave(&c->vc.lock, flags);
+       if (c->status == DMA_PAUSED) {
+               c->status = DMA_IN_PROGRESS;
+
+               p = c->phy;
+               if (p) {
+                       writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
+               } else if (!list_empty(&c->vc.desc_issued)) {
+                       spin_lock(&d->lock);
+                       list_add_tail(&c->node, &d->chan_pending);
+                       spin_unlock(&d->lock);
                }
-               spin_unlock_irqrestore(&c->vc.lock, flags);
-               ret = 0;
-               break;
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
 
-       default:
-               ret = -ENXIO;
-               break;
+       return 0;
+}
+
+static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
+{
+       struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+       struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+       struct sa11x0_dma_phy *p;
+       LIST_HEAD(head);
+       unsigned long flags;
+
+       dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+       /* Clear the tx descriptor lists */
+       spin_lock_irqsave(&c->vc.lock, flags);
+       vchan_get_all_descriptors(&c->vc, &head);
+
+       p = c->phy;
+       if (p) {
+               dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
+               /* vchan is assigned to a pchan - stop the channel */
+               writel(DCSR_RUN | DCSR_IE |
+                      DCSR_STRTA | DCSR_DONEA |
+                      DCSR_STRTB | DCSR_DONEB,
+                      p->base + DMA_DCSR_C);
+
+               if (p->txd_load) {
+                       if (p->txd_load != p->txd_done)
+                               list_add_tail(&p->txd_load->vd.node, &head);
+                       p->txd_load = NULL;
+               }
+               if (p->txd_done) {
+                       list_add_tail(&p->txd_done->vd.node, &head);
+                       p->txd_done = NULL;
+               }
+               c->phy = NULL;
+               spin_lock(&d->lock);
+               p->vchan = NULL;
+               spin_unlock(&d->lock);
+               tasklet_schedule(&d->task);
        }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       vchan_dma_desc_free_list(&c->vc, &head);
 
-       return ret;
+       return 0;
 }
 
 struct sa11x0_dma_channel_desc {
@@ -833,7 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
        dmadev->dev = dev;
        dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
        dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
-       dmadev->device_control = sa11x0_dma_control;
+       dmadev->device_config = sa11x0_dma_device_config;
+       dmadev->device_pause = sa11x0_dma_device_pause;
+       dmadev->device_resume = sa11x0_dma_device_resume;
+       dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
        dmadev->device_tx_status = sa11x0_dma_tx_status;
        dmadev->device_issue_pending = sa11x0_dma_issue_pending;
 
index 0349125a2e20af8b4af6a85ca3ed5d9708b662c8..8190ad225a1b80bdad6daab9d43904421b8481f7 100644 (file)
@@ -2,6 +2,10 @@
 # DMA engine configuration for sh
 #
 
+config RENESAS_DMA
+       bool
+       select DMA_ENGINE
+
 #
 # DMA Engine Helpers
 #
@@ -12,7 +16,7 @@ config SH_DMAE_BASE
        depends on !SUPERH || SH_DMA
        depends on !SH_DMA_API
        default y
-       select DMA_ENGINE
+       select RENESAS_DMA
        help
          Enable support for the Renesas SuperH DMA controllers.
 
@@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP
        depends on SH_DMAE_BASE
        help
          Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
+
+config RCAR_DMAC
+       tristate "Renesas R-Car Gen2 DMA Controller"
+       depends on ARCH_SHMOBILE || COMPILE_TEST
+       select RENESAS_DMA
+       help
+         This driver supports the general purpose DMA controller found in the
+         Renesas R-Car second generation SoCs.
index 0a5cfdb76e45cab3326967c28f3f231a154a16eb..2852f9db61a40a668f3140299c6a2760226aea13 100644 (file)
@@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
 obj-$(CONFIG_SUDMAC) += sudmac.o
 obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
+obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
new file mode 100644 (file)
index 0000000..a18d16c
--- /dev/null
@@ -0,0 +1,1770 @@
+/*
+ * Renesas R-Car Gen2 DMA Controller Driver
+ *
+ * Copyright (C) 2014 Renesas Electronics Inc.
+ *
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+
+/*
+ * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
+ * @node: entry in the parent's chunks list
+ * @src_addr: device source address
+ * @dst_addr: device destination address
+ * @size: transfer size in bytes
+ */
+struct rcar_dmac_xfer_chunk {
+       struct list_head node;
+
+       dma_addr_t src_addr;
+       dma_addr_t dst_addr;
+       u32 size;
+};
+
+/*
+ * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
+ * @sar: value of the SAR register (source address)
+ * @dar: value of the DAR register (destination address)
+ * @tcr: value of the TCR register (transfer count)
+ */
+struct rcar_dmac_hw_desc {
+       u32 sar;
+       u32 dar;
+       u32 tcr;
+       u32 reserved;
+} __attribute__((__packed__));
+
+/*
+ * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
+ * @async_tx: base DMA asynchronous transaction descriptor
+ * @direction: direction of the DMA transfer
+ * @xfer_shift: log2 of the transfer size
+ * @chcr: value of the channel configuration register for this transfer
+ * @node: entry in the channel's descriptors lists
+ * @chunks: list of transfer chunks for this transfer
+ * @running: the transfer chunk being currently processed
+ * @nchunks: number of transfer chunks for this transfer
+ * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
+ * @hwdescs.mem: hardware descriptors memory for the transfer
+ * @hwdescs.dma: device address of the hardware descriptors memory
+ * @hwdescs.size: size of the hardware descriptors in bytes
+ * @size: transfer size in bytes
+ * @cyclic: when set indicates that the DMA transfer is cyclic
+ */
+struct rcar_dmac_desc {
+       struct dma_async_tx_descriptor async_tx;
+       enum dma_transfer_direction direction;
+       unsigned int xfer_shift;
+       u32 chcr;
+
+       struct list_head node;
+       struct list_head chunks;
+       struct rcar_dmac_xfer_chunk *running;
+       unsigned int nchunks;
+
+       struct {
+               bool use;
+               struct rcar_dmac_hw_desc *mem;
+               dma_addr_t dma;
+               size_t size;
+       } hwdescs;
+
+       unsigned int size;
+       bool cyclic;
+};
+
+#define to_rcar_dmac_desc(d)   container_of(d, struct rcar_dmac_desc, async_tx)
+
+/*
+ * struct rcar_dmac_desc_page - One page worth of descriptors
+ * @node: entry in the channel's pages list
+ * @descs: array of DMA descriptors
+ * @chunks: array of transfer chunk descriptors
+ */
+struct rcar_dmac_desc_page {
+       struct list_head node;
+
+       union {
+               struct rcar_dmac_desc descs[0];
+               struct rcar_dmac_xfer_chunk chunks[0];
+       };
+};
+
+#define RCAR_DMAC_DESCS_PER_PAGE                                       \
+       ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) /    \
+       sizeof(struct rcar_dmac_desc))
+#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE                                 \
+       ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) /   \
+       sizeof(struct rcar_dmac_xfer_chunk))
+
+/*
+ * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
+ * @chan: base DMA channel object
+ * @iomem: channel I/O memory base
+ * @index: index of this channel in the controller
+ * @src_xfer_size: size (in bytes) of hardware transfers on the source side
+ * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
+ * @src_slave_addr: slave source memory address
+ * @dst_slave_addr: slave destination memory address
+ * @mid_rid: hardware MID/RID for the DMA client using this channel
+ * @lock: protects the channel CHCR register and the desc members
+ * @desc.free: list of free descriptors
+ * @desc.pending: list of pending descriptors (submitted with tx_submit)
+ * @desc.active: list of active descriptors (activated with issue_pending)
+ * @desc.done: list of completed descriptors
+ * @desc.wait: list of descriptors waiting for an ack
+ * @desc.running: the descriptor being processed (a member of the active list)
+ * @desc.chunks_free: list of free transfer chunk descriptors
+ * @desc.pages: list of pages used by allocated descriptors
+ */
+struct rcar_dmac_chan {
+       struct dma_chan chan;
+       void __iomem *iomem;
+       unsigned int index;
+
+       unsigned int src_xfer_size;
+       unsigned int dst_xfer_size;
+       dma_addr_t src_slave_addr;
+       dma_addr_t dst_slave_addr;
+       int mid_rid;
+
+       spinlock_t lock;
+
+       struct {
+               struct list_head free;
+               struct list_head pending;
+               struct list_head active;
+               struct list_head done;
+               struct list_head wait;
+               struct rcar_dmac_desc *running;
+
+               struct list_head chunks_free;
+
+               struct list_head pages;
+       } desc;
+};
+
+#define to_rcar_dmac_chan(c)   container_of(c, struct rcar_dmac_chan, chan)
+
+/*
+ * struct rcar_dmac - R-Car Gen2 DMA Controller
+ * @engine: base DMA engine object
+ * @dev: the hardware device
+ * @iomem: remapped I/O memory base
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
+ * @modules: bitmask of client modules in use
+ */
+struct rcar_dmac {
+       struct dma_device engine;
+       struct device *dev;
+       void __iomem *iomem;
+
+       unsigned int n_channels;
+       struct rcar_dmac_chan *channels;
+
+       unsigned long modules[256 / BITS_PER_LONG];
+};
+
+#define to_rcar_dmac(d)                container_of(d, struct rcar_dmac, engine)
+
+/* -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define RCAR_DMAC_CHAN_OFFSET(i)       (0x8000 + 0x80 * (i))
+
+#define RCAR_DMAISTA                   0x0020
+#define RCAR_DMASEC                    0x0030
+#define RCAR_DMAOR                     0x0060
+#define RCAR_DMAOR_PRI_FIXED           (0 << 8)
+#define RCAR_DMAOR_PRI_ROUND_ROBIN     (3 << 8)
+#define RCAR_DMAOR_AE                  (1 << 2)
+#define RCAR_DMAOR_DME                 (1 << 0)
+#define RCAR_DMACHCLR                  0x0080
+#define RCAR_DMADPSEC                  0x00a0
+
+#define RCAR_DMASAR                    0x0000
+#define RCAR_DMADAR                    0x0004
+#define RCAR_DMATCR                    0x0008
+#define RCAR_DMATCR_MASK               0x00ffffff
+#define RCAR_DMATSR                    0x0028
+#define RCAR_DMACHCR                   0x000c
+#define RCAR_DMACHCR_CAE               (1 << 31)
+#define RCAR_DMACHCR_CAIE              (1 << 30)
+#define RCAR_DMACHCR_DPM_DISABLED      (0 << 28)
+#define RCAR_DMACHCR_DPM_ENABLED       (1 << 28)
+#define RCAR_DMACHCR_DPM_REPEAT                (2 << 28)
+#define RCAR_DMACHCR_DPM_INFINITE      (3 << 28)
+#define RCAR_DMACHCR_RPT_SAR           (1 << 27)
+#define RCAR_DMACHCR_RPT_DAR           (1 << 26)
+#define RCAR_DMACHCR_RPT_TCR           (1 << 25)
+#define RCAR_DMACHCR_DPB               (1 << 22)
+#define RCAR_DMACHCR_DSE               (1 << 19)
+#define RCAR_DMACHCR_DSIE              (1 << 18)
+#define RCAR_DMACHCR_TS_1B             ((0 << 20) | (0 << 3))
+#define RCAR_DMACHCR_TS_2B             ((0 << 20) | (1 << 3))
+#define RCAR_DMACHCR_TS_4B             ((0 << 20) | (2 << 3))
+#define RCAR_DMACHCR_TS_16B            ((0 << 20) | (3 << 3))
+#define RCAR_DMACHCR_TS_32B            ((1 << 20) | (0 << 3))
+#define RCAR_DMACHCR_TS_64B            ((1 << 20) | (1 << 3))
+#define RCAR_DMACHCR_TS_8B             ((1 << 20) | (3 << 3))
+#define RCAR_DMACHCR_DM_FIXED          (0 << 14)
+#define RCAR_DMACHCR_DM_INC            (1 << 14)
+#define RCAR_DMACHCR_DM_DEC            (2 << 14)
+#define RCAR_DMACHCR_SM_FIXED          (0 << 12)
+#define RCAR_DMACHCR_SM_INC            (1 << 12)
+#define RCAR_DMACHCR_SM_DEC            (2 << 12)
+#define RCAR_DMACHCR_RS_AUTO           (4 << 8)
+#define RCAR_DMACHCR_RS_DMARS          (8 << 8)
+#define RCAR_DMACHCR_IE                        (1 << 2)
+#define RCAR_DMACHCR_TE                        (1 << 1)
+#define RCAR_DMACHCR_DE                        (1 << 0)
+#define RCAR_DMATCRB                   0x0018
+#define RCAR_DMATSRB                   0x0038
+#define RCAR_DMACHCRB                  0x001c
+#define RCAR_DMACHCRB_DCNT(n)          ((n) << 24)
+#define RCAR_DMACHCRB_DPTR_MASK                (0xff << 16)
+#define RCAR_DMACHCRB_DPTR_SHIFT       16
+#define RCAR_DMACHCRB_DRST             (1 << 15)
+#define RCAR_DMACHCRB_DTS              (1 << 8)
+#define RCAR_DMACHCRB_SLM_NORMAL       (0 << 4)
+#define RCAR_DMACHCRB_SLM_CLK(n)       ((8 | (n)) << 4)
+#define RCAR_DMACHCRB_PRI(n)           ((n) << 0)
+#define RCAR_DMARS                     0x0040
+#define RCAR_DMABUFCR                  0x0048
+#define RCAR_DMABUFCR_MBU(n)           ((n) << 16)
+#define RCAR_DMABUFCR_ULB(n)           ((n) << 0)
+#define RCAR_DMADPBASE                 0x0050
+#define RCAR_DMADPBASE_MASK            0xfffffff0
+#define RCAR_DMADPBASE_SEL             (1 << 0)
+#define RCAR_DMADPCR                   0x0054
+#define RCAR_DMADPCR_DIPT(n)           ((n) << 24)
+#define RCAR_DMAFIXSAR                 0x0010
+#define RCAR_DMAFIXDAR                 0x0014
+#define RCAR_DMAFIXDPBASE              0x0060
+
+/* Hardcode the MEMCPY transfer size to 4 bytes. */
+#define RCAR_DMAC_MEMCPY_XFER_SIZE     4
+
+/* -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
+{
+       if (reg == RCAR_DMAOR)
+               writew(data, dmac->iomem + reg);
+       else
+               writel(data, dmac->iomem + reg);
+}
+
+static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
+{
+       if (reg == RCAR_DMAOR)
+               return readw(dmac->iomem + reg);
+       else
+               return readl(dmac->iomem + reg);
+}
+
+static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
+{
+       if (reg == RCAR_DMARS)
+               return readw(chan->iomem + reg);
+       else
+               return readl(chan->iomem + reg);
+}
+
+static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
+{
+       if (reg == RCAR_DMARS)
+               writew(data, chan->iomem + reg);
+       else
+               writel(data, chan->iomem + reg);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and configuration
+ */
+
+static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
+{
+       u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+       return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
+}
+
+static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_desc *desc = chan->desc.running;
+       u32 chcr = desc->chcr;
+
+       WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
+
+       if (chan->mid_rid >= 0)
+               rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
+
+       if (desc->hwdescs.use) {
+               struct rcar_dmac_xfer_chunk *chunk;
+
+               dev_dbg(chan->chan.device->dev,
+                       "chan%u: queue desc %p: %u@%pad\n",
+                       chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
+                                    desc->hwdescs.dma >> 32);
+#endif
+               rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
+                                    (desc->hwdescs.dma & 0xfffffff0) |
+                                    RCAR_DMADPBASE_SEL);
+               rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
+                                    RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
+                                    RCAR_DMACHCRB_DRST);
+
+               /*
+                * Errata: When descriptor memory is accessed through an IOMMU
+                * the DMADAR register isn't initialized automatically from the
+                * first descriptor at beginning of transfer by the DMAC like it
+                * should. Initialize it manually with the destination address
+                * of the first chunk.
+                */
+               chunk = list_first_entry(&desc->chunks,
+                                        struct rcar_dmac_xfer_chunk, node);
+               rcar_dmac_chan_write(chan, RCAR_DMADAR,
+                                    chunk->dst_addr & 0xffffffff);
+
+               /*
+                * Program the descriptor stage interrupt to occur after the end
+                * of the first stage.
+                */
+               rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
+
+               chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
+                    |  RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
+
+               /*
+                * If the descriptor isn't cyclic enable normal descriptor mode
+                * and the transfer completion interrupt.
+                */
+               if (!desc->cyclic)
+                       chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
+               /*
+                * If the descriptor is cyclic and has a callback enable the
+                * descriptor stage interrupt in infinite repeat mode.
+                */
+               else if (desc->async_tx.callback)
+                       chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
+               /*
+                * Otherwise just select infinite repeat mode without any
+                * interrupt.
+                */
+               else
+                       chcr |= RCAR_DMACHCR_DPM_INFINITE;
+       } else {
+               struct rcar_dmac_xfer_chunk *chunk = desc->running;
+
+               dev_dbg(chan->chan.device->dev,
+                       "chan%u: queue chunk %p: %u@%pad -> %pad\n",
+                       chan->index, chunk, chunk->size, &chunk->src_addr,
+                       &chunk->dst_addr);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
+                                    chunk->src_addr >> 32);
+               rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
+                                    chunk->dst_addr >> 32);
+#endif
+               rcar_dmac_chan_write(chan, RCAR_DMASAR,
+                                    chunk->src_addr & 0xffffffff);
+               rcar_dmac_chan_write(chan, RCAR_DMADAR,
+                                    chunk->dst_addr & 0xffffffff);
+               rcar_dmac_chan_write(chan, RCAR_DMATCR,
+                                    chunk->size >> desc->xfer_shift);
+
+               chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
+       }
+
+       rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
+}
+
+static int rcar_dmac_init(struct rcar_dmac *dmac)
+{
+       u16 dmaor;
+
+       /* Clear all channels and enable the DMAC globally. */
+       rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
+       rcar_dmac_write(dmac, RCAR_DMAOR,
+                       RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
+
+       dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
+       if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
+               dev_warn(dmac->dev, "DMAOR initialization failed.\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors submission
+ */
+
+static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
+       struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
+       unsigned long flags;
+       dma_cookie_t cookie;
+
+       spin_lock_irqsave(&chan->lock, flags);
+
+       cookie = dma_cookie_assign(tx);
+
+       dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
+               chan->index, tx->cookie, desc);
+
+       list_add_tail(&desc->node, &chan->desc.pending);
+       desc->running = list_first_entry(&desc->chunks,
+                                        struct rcar_dmac_xfer_chunk, node);
+
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       return cookie;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors allocation and free
+ */
+
+/*
+ * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
+ * @chan: the DMA channel
+ * @gfp: allocation flags
+ */
+static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
+{
+       struct rcar_dmac_desc_page *page;
+       LIST_HEAD(list);
+       unsigned int i;
+
+       page = (void *)get_zeroed_page(gfp);
+       if (!page)
+               return -ENOMEM;
+
+       for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
+               struct rcar_dmac_desc *desc = &page->descs[i];
+
+               dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+               desc->async_tx.tx_submit = rcar_dmac_tx_submit;
+               INIT_LIST_HEAD(&desc->chunks);
+
+               list_add_tail(&desc->node, &list);
+       }
+
+       spin_lock_irq(&chan->lock);
+       list_splice_tail(&list, &chan->desc.free);
+       list_add_tail(&page->node, &chan->desc.pages);
+       spin_unlock_irq(&chan->lock);
+
+       return 0;
+}
+
+/*
+ * rcar_dmac_desc_put - Release a DMA transfer descriptor
+ * @chan: the DMA channel
+ * @desc: the descriptor
+ *
+ * Put the descriptor and its transfer chunk descriptors back in the channel's
+ * free descriptors lists. The descriptor's chunks list will be reinitialized to
+ * an empty list as a result.
+ *
+ * The descriptor must have been removed from the channel's lists before calling
+ * this function.
+ */
+static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
+                              struct rcar_dmac_desc *desc)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
+       list_add_tail(&desc->node, &chan->desc.free);
+       spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_desc *desc, *_desc;
+       LIST_HEAD(list);
+
+       /*
+        * We have to temporarily move all descriptors from the wait list to a
+        * local list as iterating over the wait list, even with
+        * list_for_each_entry_safe, isn't safe if we release the channel lock
+        * around the rcar_dmac_desc_put() call.
+        */
+       spin_lock_irq(&chan->lock);
+       list_splice_init(&chan->desc.wait, &list);
+       spin_unlock_irq(&chan->lock);
+
+       list_for_each_entry_safe(desc, _desc, &list, node) {
+               if (async_tx_test_ack(&desc->async_tx)) {
+                       list_del(&desc->node);
+                       rcar_dmac_desc_put(chan, desc);
+               }
+       }
+
+       if (list_empty(&list))
+               return;
+
+       /* Put the remaining descriptors back in the wait list. */
+       spin_lock_irq(&chan->lock);
+       list_splice(&list, &chan->desc.wait);
+       spin_unlock_irq(&chan->lock);
+}
+
+/*
+ * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
+ * @chan: the DMA channel
+ *
+ * Locking: This function must be called in a non-atomic context.
+ *
+ * Return: A pointer to the allocated descriptor or NULL if no descriptor can
+ * be allocated.
+ */
+static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_desc *desc;
+       int ret;
+
+       /* Recycle acked descriptors before attempting allocation. */
+       rcar_dmac_desc_recycle_acked(chan);
+
+       spin_lock_irq(&chan->lock);
+
+       while (list_empty(&chan->desc.free)) {
+               /*
+                * No free descriptors, allocate a page worth of them and try
+                * again, as someone else could race us to get the newly
+                * allocated descriptors. If the allocation fails return an
+                * error.
+                */
+               spin_unlock_irq(&chan->lock);
+               ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
+               if (ret < 0)
+                       return NULL;
+               spin_lock_irq(&chan->lock);
+       }
+
+       desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
+       list_del(&desc->node);
+
+       spin_unlock_irq(&chan->lock);
+
+       return desc;
+}
+
+/*
+ * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
+ * @chan: the DMA channel
+ * @gfp: allocation flags
+ */
+static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
+{
+       struct rcar_dmac_desc_page *page;
+       LIST_HEAD(list);
+       unsigned int i;
+
+       page = (void *)get_zeroed_page(gfp);
+       if (!page)
+               return -ENOMEM;
+
+       for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
+               struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
+
+               list_add_tail(&chunk->node, &list);
+       }
+
+       spin_lock_irq(&chan->lock);
+       list_splice_tail(&list, &chan->desc.chunks_free);
+       list_add_tail(&page->node, &chan->desc.pages);
+       spin_unlock_irq(&chan->lock);
+
+       return 0;
+}
+
+/*
+ * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
+ * @chan: the DMA channel
+ *
+ * Locking: This function must be called in a non-atomic context.
+ *
+ * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
+ * descriptor can be allocated.
+ */
+static struct rcar_dmac_xfer_chunk *
+rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_xfer_chunk *chunk;
+       int ret;
+
+       spin_lock_irq(&chan->lock);
+
+       while (list_empty(&chan->desc.chunks_free)) {
+               /*
+                * No free descriptors, allocate a page worth of them and try
+                * again, as someone else could race us to get the newly
+                * allocated descriptors. If the allocation fails return an
+                * error.
+                */
+               spin_unlock_irq(&chan->lock);
+               ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
+               if (ret < 0)
+                       return NULL;
+               spin_lock_irq(&chan->lock);
+       }
+
+       chunk = list_first_entry(&chan->desc.chunks_free,
+                                struct rcar_dmac_xfer_chunk, node);
+       list_del(&chunk->node);
+
+       spin_unlock_irq(&chan->lock);
+
+       return chunk;
+}
+
+static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
+                                    struct rcar_dmac_desc *desc, size_t size)
+{
+       /*
+        * dma_alloc_coherent() allocates memory in page size increments. To
+        * avoid reallocating the hardware descriptors when the allocated size
+        * wouldn't change align the requested size to a multiple of the page
+        * size.
+        */
+       size = PAGE_ALIGN(size);
+
+       if (desc->hwdescs.size == size)
+               return;
+
+       if (desc->hwdescs.mem) {
+               dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
+                                 desc->hwdescs.mem, desc->hwdescs.dma);
+               desc->hwdescs.mem = NULL;
+               desc->hwdescs.size = 0;
+       }
+
+       if (!size)
+               return;
+
+       desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
+                                              &desc->hwdescs.dma, GFP_NOWAIT);
+       if (!desc->hwdescs.mem)
+               return;
+
+       desc->hwdescs.size = size;
+}
+
+static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
+                                struct rcar_dmac_desc *desc)
+{
+       struct rcar_dmac_xfer_chunk *chunk;
+       struct rcar_dmac_hw_desc *hwdesc;
+
+       rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
+
+       hwdesc = desc->hwdescs.mem;
+       if (!hwdesc)
+               return -ENOMEM;
+
+       list_for_each_entry(chunk, &desc->chunks, node) {
+               hwdesc->sar = chunk->src_addr;
+               hwdesc->dar = chunk->dst_addr;
+               hwdesc->tcr = chunk->size >> desc->xfer_shift;
+               hwdesc++;
+       }
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Stop and reset
+ */
+
+static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
+{
+       u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+       chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
+                 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
+       rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+}
+
+static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_desc *desc, *_desc;
+       unsigned long flags;
+       LIST_HEAD(descs);
+
+       spin_lock_irqsave(&chan->lock, flags);
+
+       /* Move all non-free descriptors to the local lists. */
+       list_splice_init(&chan->desc.pending, &descs);
+       list_splice_init(&chan->desc.active, &descs);
+       list_splice_init(&chan->desc.done, &descs);
+       list_splice_init(&chan->desc.wait, &descs);
+
+       chan->desc.running = NULL;
+
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       list_for_each_entry_safe(desc, _desc, &descs, node) {
+               list_del(&desc->node);
+               rcar_dmac_desc_put(chan, desc);
+       }
+}
+
+static void rcar_dmac_stop(struct rcar_dmac *dmac)
+{
+       rcar_dmac_write(dmac, RCAR_DMAOR, 0);
+}
+
+static void rcar_dmac_abort(struct rcar_dmac *dmac)
+{
+       unsigned int i;
+
+       /* Stop all channels. */
+       for (i = 0; i < dmac->n_channels; ++i) {
+               struct rcar_dmac_chan *chan = &dmac->channels[i];
+
+               /* Stop and reinitialize the channel. */
+               spin_lock(&chan->lock);
+               rcar_dmac_chan_halt(chan);
+               spin_unlock(&chan->lock);
+
+               rcar_dmac_chan_reinit(chan);
+       }
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
+                                         struct rcar_dmac_desc *desc)
+{
+       static const u32 chcr_ts[] = {
+               RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
+               RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
+               RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
+               RCAR_DMACHCR_TS_64B,
+       };
+
+       unsigned int xfer_size;
+       u32 chcr;
+
+       switch (desc->direction) {
+       case DMA_DEV_TO_MEM:
+               chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
+                    | RCAR_DMACHCR_RS_DMARS;
+               xfer_size = chan->src_xfer_size;
+               break;
+
+       case DMA_MEM_TO_DEV:
+               chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
+                    | RCAR_DMACHCR_RS_DMARS;
+               xfer_size = chan->dst_xfer_size;
+               break;
+
+       case DMA_MEM_TO_MEM:
+       default:
+               chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
+                    | RCAR_DMACHCR_RS_AUTO;
+               xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
+               break;
+       }
+
+       desc->xfer_shift = ilog2(xfer_size);
+       desc->chcr = chcr | chcr_ts[desc->xfer_shift];
+}
+
+/*
+ * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *
+rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
+                      unsigned int sg_len, dma_addr_t dev_addr,
+                      enum dma_transfer_direction dir, unsigned long dma_flags,
+                      bool cyclic)
+{
+       struct rcar_dmac_xfer_chunk *chunk;
+       struct rcar_dmac_desc *desc;
+       struct scatterlist *sg;
+       unsigned int nchunks = 0;
+       unsigned int max_chunk_size;
+       unsigned int full_size = 0;
+       bool highmem = false;
+       unsigned int i;
+
+       desc = rcar_dmac_desc_get(chan);
+       if (!desc)
+               return NULL;
+
+       desc->async_tx.flags = dma_flags;
+       desc->async_tx.cookie = -EBUSY;
+
+       desc->cyclic = cyclic;
+       desc->direction = dir;
+
+       rcar_dmac_chan_configure_desc(chan, desc);
+
+       max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
+
+       /*
+        * Allocate and fill the transfer chunk descriptors. We own the only
+        * reference to the DMA descriptor, there's no need for locking.
+        */
+       for_each_sg(sgl, sg, sg_len, i) {
+               dma_addr_t mem_addr = sg_dma_address(sg);
+               unsigned int len = sg_dma_len(sg);
+
+               full_size += len;
+
+               while (len) {
+                       unsigned int size = min(len, max_chunk_size);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+                       /*
+                        * Prevent individual transfers from crossing 4GB
+                        * boundaries.
+                        */
+                       if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
+                               size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
+                       if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
+                               size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
+
+                       /*
+                        * Check if either of the source or destination address
+                        * can't be expressed in 32 bits. If so we can't use
+                        * hardware descriptor lists.
+                        */
+                       if (dev_addr >> 32 || mem_addr >> 32)
+                               highmem = true;
+#endif
+
+                       chunk = rcar_dmac_xfer_chunk_get(chan);
+                       if (!chunk) {
+                               rcar_dmac_desc_put(chan, desc);
+                               return NULL;
+                       }
+
+                       if (dir == DMA_DEV_TO_MEM) {
+                               chunk->src_addr = dev_addr;
+                               chunk->dst_addr = mem_addr;
+                       } else {
+                               chunk->src_addr = mem_addr;
+                               chunk->dst_addr = dev_addr;
+                       }
+
+                       chunk->size = size;
+
+                       dev_dbg(chan->chan.device->dev,
+                               "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
+                               chan->index, chunk, desc, i, sg, size, len,
+                               &chunk->src_addr, &chunk->dst_addr);
+
+                       mem_addr += size;
+                       if (dir == DMA_MEM_TO_MEM)
+                               dev_addr += size;
+
+                       len -= size;
+
+                       list_add_tail(&chunk->node, &desc->chunks);
+                       nchunks++;
+               }
+       }
+
+       desc->nchunks = nchunks;
+       desc->size = full_size;
+
+       /*
+        * Use hardware descriptor lists if possible when more than one chunk
+        * needs to be transferred (otherwise they don't make much sense).
+        *
+        * The highmem check currently covers the whole transfer. As an
+        * optimization we could use descriptor lists for consecutive lowmem
+        * chunks and direct manual mode for highmem chunks. Whether the
+        * performance improvement would be significant enough compared to the
+        * additional complexity remains to be investigated.
+        */
+       desc->hwdescs.use = !highmem && nchunks > 1;
+       if (desc->hwdescs.use) {
+               if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
+                       desc->hwdescs.use = false;
+       }
+
+       return &desc->async_tx;
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       int ret;
+
+       INIT_LIST_HEAD(&rchan->desc.chunks_free);
+       INIT_LIST_HEAD(&rchan->desc.pages);
+
+       /* Preallocate descriptors. */
+       ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
+       if (ret < 0)
+               return -ENOMEM;
+
+       ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
+       if (ret < 0)
+               return -ENOMEM;
+
+       return pm_runtime_get_sync(chan->device->dev);
+}
+
+static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+       struct rcar_dmac_desc_page *page, *_page;
+       struct rcar_dmac_desc *desc;
+       LIST_HEAD(list);
+
+       /* Protect against ISR */
+       spin_lock_irq(&rchan->lock);
+       rcar_dmac_chan_halt(rchan);
+       spin_unlock_irq(&rchan->lock);
+
+       /* Now no new interrupts will occur */
+
+       if (rchan->mid_rid >= 0) {
+               /* The caller is holding dma_list_mutex */
+               clear_bit(rchan->mid_rid, dmac->modules);
+               rchan->mid_rid = -EINVAL;
+       }
+
+       list_splice_init(&rchan->desc.free, &list);
+       list_splice_init(&rchan->desc.pending, &list);
+       list_splice_init(&rchan->desc.active, &list);
+       list_splice_init(&rchan->desc.done, &list);
+       list_splice_init(&rchan->desc.wait, &list);
+
+       list_for_each_entry(desc, &list, node)
+               rcar_dmac_realloc_hwdesc(rchan, desc, 0);
+
+       list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
+               list_del(&page->node);
+               free_page((unsigned long)page);
+       }
+
+       pm_runtime_put(chan->device->dev);
+}
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+                         dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       struct scatterlist sgl;
+
+       if (!len)
+               return NULL;
+
+       sg_init_table(&sgl, 1);
+       sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
+                   offset_in_page(dma_src));
+       sg_dma_address(&sgl) = dma_src;
+       sg_dma_len(&sgl) = len;
+
+       return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
+                                     DMA_MEM_TO_MEM, flags, false);
+}
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                       unsigned int sg_len, enum dma_transfer_direction dir,
+                       unsigned long flags, void *context)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       dma_addr_t dev_addr;
+
+       /* Someone calling slave DMA on a generic channel? */
+       if (rchan->mid_rid < 0 || !sg_len) {
+               dev_warn(chan->device->dev,
+                        "%s: bad parameter: len=%d, id=%d\n",
+                        __func__, sg_len, rchan->mid_rid);
+               return NULL;
+       }
+
+       dev_addr = dir == DMA_DEV_TO_MEM
+                ? rchan->src_slave_addr : rchan->dst_slave_addr;
+       return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+                                     dir, flags, false);
+}
+
+#define RCAR_DMAC_MAX_SG_LEN   32
+
+static struct dma_async_tx_descriptor *
+rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+                         size_t buf_len, size_t period_len,
+                         enum dma_transfer_direction dir, unsigned long flags)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       struct dma_async_tx_descriptor *desc;
+       struct scatterlist *sgl;
+       dma_addr_t dev_addr;
+       unsigned int sg_len;
+       unsigned int i;
+
+       /* Someone calling slave DMA on a generic channel? */
+       if (rchan->mid_rid < 0 || buf_len < period_len) {
+               dev_warn(chan->device->dev,
+                       "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
+                       __func__, buf_len, period_len, rchan->mid_rid);
+               return NULL;
+       }
+
+       sg_len = buf_len / period_len;
+       if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
+               dev_err(chan->device->dev,
+                       "chan%u: sg length %d exceds limit %d",
+                       rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
+               return NULL;
+       }
+
+       /*
+        * Allocate the sg list dynamically as it would consume too much stack
+        * space.
+        */
+       sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
+       if (!sgl)
+               return NULL;
+
+       sg_init_table(sgl, sg_len);
+
+       for (i = 0; i < sg_len; ++i) {
+               dma_addr_t src = buf_addr + (period_len * i);
+
+               sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
+                           offset_in_page(src));
+               sg_dma_address(&sgl[i]) = src;
+               sg_dma_len(&sgl[i]) = period_len;
+       }
+
+       dev_addr = dir == DMA_DEV_TO_MEM
+                ? rchan->src_slave_addr : rchan->dst_slave_addr;
+       desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+                                     dir, flags, true);
+
+       kfree(sgl);
+       return desc;
+}
+
+static int rcar_dmac_device_config(struct dma_chan *chan,
+                                  struct dma_slave_config *cfg)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+       /*
+        * We could lock this, but you shouldn't be configuring the
+        * channel, while using it...
+        */
+       rchan->src_slave_addr = cfg->src_addr;
+       rchan->dst_slave_addr = cfg->dst_addr;
+       rchan->src_xfer_size = cfg->src_addr_width;
+       rchan->dst_xfer_size = cfg->dst_addr_width;
+
+       return 0;
+}
+
+static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&rchan->lock, flags);
+       rcar_dmac_chan_halt(rchan);
+       spin_unlock_irqrestore(&rchan->lock, flags);
+
+       /*
+        * FIXME: No new interrupt can occur now, but the IRQ thread might still
+        * be running.
+        */
+
+       rcar_dmac_chan_reinit(rchan);
+
+       return 0;
+}
+
+static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
+                                              dma_cookie_t cookie)
+{
+       struct rcar_dmac_desc *desc = chan->desc.running;
+       struct rcar_dmac_xfer_chunk *running = NULL;
+       struct rcar_dmac_xfer_chunk *chunk;
+       unsigned int residue = 0;
+       unsigned int dptr = 0;
+
+       if (!desc)
+               return 0;
+
+       /*
+        * If the cookie doesn't correspond to the currently running transfer
+        * then the descriptor hasn't been processed yet, and the residue is
+        * equal to the full descriptor size.
+        */
+       if (cookie != desc->async_tx.cookie)
+               return desc->size;
+
+       /*
+        * In descriptor mode the descriptor running pointer is not maintained
+        * by the interrupt handler, find the running descriptor from the
+        * descriptor pointer field in the CHCRB register. In non-descriptor
+        * mode just use the running descriptor pointer.
+        */
+       if (desc->hwdescs.use) {
+               dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                       RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               WARN_ON(dptr >= desc->nchunks);
+       } else {
+               running = desc->running;
+       }
+
+       /* Compute the size of all chunks still to be transferred. */
+       list_for_each_entry_reverse(chunk, &desc->chunks, node) {
+               if (chunk == running || ++dptr == desc->nchunks)
+                       break;
+
+               residue += chunk->size;
+       }
+
+       /* Add the residue for the current chunk. */
+       residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
+
+       return residue;
+}
+
+static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
+                                          dma_cookie_t cookie,
+                                          struct dma_tx_state *txstate)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       enum dma_status status;
+       unsigned long flags;
+       unsigned int residue;
+
+       status = dma_cookie_status(chan, cookie, txstate);
+       if (status == DMA_COMPLETE || !txstate)
+               return status;
+
+       spin_lock_irqsave(&rchan->lock, flags);
+       residue = rcar_dmac_chan_get_residue(rchan, cookie);
+       spin_unlock_irqrestore(&rchan->lock, flags);
+
+       dma_set_residue(txstate, residue);
+
+       return status;
+}
+
+static void rcar_dmac_issue_pending(struct dma_chan *chan)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&rchan->lock, flags);
+
+       if (list_empty(&rchan->desc.pending))
+               goto done;
+
+       /* Append the pending list to the active list. */
+       list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
+
+       /*
+        * If no transfer is running pick the first descriptor from the active
+        * list and start the transfer.
+        */
+       if (!rchan->desc.running) {
+               struct rcar_dmac_desc *desc;
+
+               desc = list_first_entry(&rchan->desc.active,
+                                       struct rcar_dmac_desc, node);
+               rchan->desc.running = desc;
+
+               rcar_dmac_chan_start_xfer(rchan);
+       }
+
+done:
+       spin_unlock_irqrestore(&rchan->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_desc *desc = chan->desc.running;
+       unsigned int stage;
+
+       if (WARN_ON(!desc || !desc->cyclic)) {
+               /*
+                * This should never happen, there should always be a running
+                * cyclic descriptor when a descriptor stage end interrupt is
+                * triggered. Warn and return.
+                */
+               return IRQ_NONE;
+       }
+
+       /* Program the interrupt pointer to the next stage. */
+       stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+       rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
+
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
+{
+       struct rcar_dmac_desc *desc = chan->desc.running;
+       irqreturn_t ret = IRQ_WAKE_THREAD;
+
+       if (WARN_ON_ONCE(!desc)) {
+               /*
+                * This should never happen, there should always be a running
+                * descriptor when a transfer end interrupt is triggered. Warn
+                * and return.
+                */
+               return IRQ_NONE;
+       }
+
+       /*
+        * The transfer end interrupt isn't generated for each chunk when using
+        * descriptor mode. Only update the running chunk pointer in
+        * non-descriptor mode.
+        */
+       if (!desc->hwdescs.use) {
+               /*
+                * If we haven't completed the last transfer chunk simply move
+                * to the next one. Only wake the IRQ thread if the transfer is
+                * cyclic.
+                */
+               if (!list_is_last(&desc->running->node, &desc->chunks)) {
+                       desc->running = list_next_entry(desc->running, node);
+                       if (!desc->cyclic)
+                               ret = IRQ_HANDLED;
+                       goto done;
+               }
+
+               /*
+                * We've completed the last transfer chunk. If the transfer is
+                * cyclic, move back to the first one.
+                */
+               if (desc->cyclic) {
+                       desc->running =
+                               list_first_entry(&desc->chunks,
+                                                struct rcar_dmac_xfer_chunk,
+                                                node);
+                       goto done;
+               }
+       }
+
+       /* The descriptor is complete, move it to the done list. */
+       list_move_tail(&desc->node, &chan->desc.done);
+
+       /* Queue the next descriptor, if any. */
+       if (!list_empty(&chan->desc.active))
+               chan->desc.running = list_first_entry(&chan->desc.active,
+                                                     struct rcar_dmac_desc,
+                                                     node);
+       else
+               chan->desc.running = NULL;
+
+done:
+       if (chan->desc.running)
+               rcar_dmac_chan_start_xfer(chan);
+
+       return ret;
+}
+
+static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
+{
+       u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
+       struct rcar_dmac_chan *chan = dev;
+       irqreturn_t ret = IRQ_NONE;
+       u32 chcr;
+
+       spin_lock(&chan->lock);
+
+       chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+       if (chcr & RCAR_DMACHCR_TE)
+               mask |= RCAR_DMACHCR_DE;
+       rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+
+       if (chcr & RCAR_DMACHCR_DSE)
+               ret |= rcar_dmac_isr_desc_stage_end(chan);
+
+       if (chcr & RCAR_DMACHCR_TE)
+               ret |= rcar_dmac_isr_transfer_end(chan);
+
+       spin_unlock(&chan->lock);
+
+       return ret;
+}
+
+static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
+{
+       struct rcar_dmac_chan *chan = dev;
+       struct rcar_dmac_desc *desc;
+
+       spin_lock_irq(&chan->lock);
+
+       /* For cyclic transfers notify the user after every chunk. */
+       if (chan->desc.running && chan->desc.running->cyclic) {
+               dma_async_tx_callback callback;
+               void *callback_param;
+
+               desc = chan->desc.running;
+               callback = desc->async_tx.callback;
+               callback_param = desc->async_tx.callback_param;
+
+               if (callback) {
+                       spin_unlock_irq(&chan->lock);
+                       callback(callback_param);
+                       spin_lock_irq(&chan->lock);
+               }
+       }
+
+       /*
+        * Call the callback function for all descriptors on the done list and
+        * move them to the ack wait list.
+        */
+       while (!list_empty(&chan->desc.done)) {
+               desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
+                                       node);
+               dma_cookie_complete(&desc->async_tx);
+               list_del(&desc->node);
+
+               if (desc->async_tx.callback) {
+                       spin_unlock_irq(&chan->lock);
+                       /*
+                        * We own the only reference to this descriptor, we can
+                        * safely dereference it without holding the channel
+                        * lock.
+                        */
+                       desc->async_tx.callback(desc->async_tx.callback_param);
+                       spin_lock_irq(&chan->lock);
+               }
+
+               list_add_tail(&desc->node, &chan->desc.wait);
+       }
+
+       spin_unlock_irq(&chan->lock);
+
+       /* Recycle all acked descriptors. */
+       rcar_dmac_desc_recycle_acked(chan);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
+{
+       struct rcar_dmac *dmac = data;
+
+       if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
+               return IRQ_NONE;
+
+       /*
+        * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
+        * abort transfers on all channels, and reinitialize the DMAC.
+        */
+       rcar_dmac_stop(dmac);
+       rcar_dmac_abort(dmac);
+       rcar_dmac_init(dmac);
+
+       return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+       struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+       struct of_phandle_args *dma_spec = arg;
+
+       /*
+        * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
+        * function knows from which device it wants to allocate a channel from,
+        * and would be perfectly capable of selecting the channel it wants.
+        * Forcing it to call dma_request_channel() and iterate through all
+        * channels from all controllers is just pointless.
+        */
+       if (chan->device->device_config != rcar_dmac_device_config ||
+           dma_spec->np != chan->device->dev->of_node)
+               return false;
+
+       return !test_and_set_bit(dma_spec->args[0], dmac->modules);
+}
+
+static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
+                                          struct of_dma *ofdma)
+{
+       struct rcar_dmac_chan *rchan;
+       struct dma_chan *chan;
+       dma_cap_mask_t mask;
+
+       if (dma_spec->args_count != 1)
+               return NULL;
+
+       /* Only slave DMA channels can be allocated via DT */
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+
+       chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
+       if (!chan)
+               return NULL;
+
+       rchan = to_rcar_dmac_chan(chan);
+       rchan->mid_rid = dma_spec->args[0];
+
+       return chan;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int rcar_dmac_sleep_suspend(struct device *dev)
+{
+       /*
+        * TODO: Wait for the current transfer to complete and stop the device.
+        */
+       return 0;
+}
+
+static int rcar_dmac_sleep_resume(struct device *dev)
+{
+       /* TODO: Resume transfers, if any. */
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int rcar_dmac_runtime_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int rcar_dmac_runtime_resume(struct device *dev)
+{
+       struct rcar_dmac *dmac = dev_get_drvdata(dev);
+
+       return rcar_dmac_init(dmac);
+}
+#endif
+
+static const struct dev_pm_ops rcar_dmac_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+       SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
+                          NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
+                               struct rcar_dmac_chan *rchan,
+                               unsigned int index)
+{
+       struct platform_device *pdev = to_platform_device(dmac->dev);
+       struct dma_chan *chan = &rchan->chan;
+       char pdev_irqname[5];
+       char *irqname;
+       int irq;
+       int ret;
+
+       rchan->index = index;
+       rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
+       rchan->mid_rid = -EINVAL;
+
+       spin_lock_init(&rchan->lock);
+
+       INIT_LIST_HEAD(&rchan->desc.free);
+       INIT_LIST_HEAD(&rchan->desc.pending);
+       INIT_LIST_HEAD(&rchan->desc.active);
+       INIT_LIST_HEAD(&rchan->desc.done);
+       INIT_LIST_HEAD(&rchan->desc.wait);
+
+       /* Request the channel interrupt. */
+       sprintf(pdev_irqname, "ch%u", index);
+       irq = platform_get_irq_byname(pdev, pdev_irqname);
+       if (irq < 0) {
+               dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+               return -ENODEV;
+       }
+
+       irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+                                dev_name(dmac->dev), index);
+       if (!irqname)
+               return -ENOMEM;
+
+       ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
+                                       rcar_dmac_isr_channel_thread, 0,
+                                       irqname, rchan);
+       if (ret) {
+               dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
+               return ret;
+       }
+
+       /*
+        * Initialize the DMA engine channel and add it to the DMA engine
+        * channels list.
+        */
+       chan->device = &dmac->engine;
+       dma_cookie_init(chan);
+
+       list_add_tail(&chan->device_node, &dmac->engine.channels);
+
+       return 0;
+}
+
+static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
+{
+       struct device_node *np = dev->of_node;
+       int ret;
+
+       ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+       if (ret < 0) {
+               dev_err(dev, "unable to read dma-channels property\n");
+               return ret;
+       }
+
+       if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+               dev_err(dev, "invalid number of channels %u\n",
+                       dmac->n_channels);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int rcar_dmac_probe(struct platform_device *pdev)
+{
+       const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
+               DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
+               DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
+               DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
+       unsigned int channels_offset = 0;
+       struct dma_device *engine;
+       struct rcar_dmac *dmac;
+       struct resource *mem;
+       unsigned int i;
+       char *irqname;
+       int irq;
+       int ret;
+
+       dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+       if (!dmac)
+               return -ENOMEM;
+
+       dmac->dev = &pdev->dev;
+       platform_set_drvdata(pdev, dmac);
+
+       ret = rcar_dmac_parse_of(&pdev->dev, dmac);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
+        * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
+        * is connected to microTLB 0 on currently supported platforms, so we
+        * can't use it with the IPMMU. As the IOMMU API operates at the device
+        * level we can't disable it selectively, so ignore channel 0 for now if
+        * the device is part of an IOMMU group.
+        */
+       if (pdev->dev.iommu_group) {
+               dmac->n_channels--;
+               channels_offset = 1;
+       }
+
+       dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+                                     sizeof(*dmac->channels), GFP_KERNEL);
+       if (!dmac->channels)
+               return -ENOMEM;
+
+       /* Request resources. */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(dmac->iomem))
+               return PTR_ERR(dmac->iomem);
+
+       irq = platform_get_irq_byname(pdev, "error");
+       if (irq < 0) {
+               dev_err(&pdev->dev, "no error IRQ specified\n");
+               return -ENODEV;
+       }
+
+       irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
+                                dev_name(dmac->dev));
+       if (!irqname)
+               return -ENOMEM;
+
+       ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
+                              irqname, dmac);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+                       irq, ret);
+               return ret;
+       }
+
+       /* Enable runtime PM and initialize the device. */
+       pm_runtime_enable(&pdev->dev);
+       ret = pm_runtime_get_sync(&pdev->dev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+               return ret;
+       }
+
+       ret = rcar_dmac_init(dmac);
+       pm_runtime_put(&pdev->dev);
+
+       if (ret) {
+               dev_err(&pdev->dev, "failed to reset device\n");
+               goto error;
+       }
+
+       /* Initialize the channels. */
+       INIT_LIST_HEAD(&dmac->engine.channels);
+
+       for (i = 0; i < dmac->n_channels; ++i) {
+               ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
+                                          i + channels_offset);
+               if (ret < 0)
+                       goto error;
+       }
+
+       /* Register the DMAC as a DMA provider for DT. */
+       ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
+                                        NULL);
+       if (ret < 0)
+               goto error;
+
+       /*
+        * Register the DMA engine device.
+        *
+        * Default transfer size of 32 bytes requires 32-byte alignment.
+        */
+       engine = &dmac->engine;
+       dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+       dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+       engine->dev = &pdev->dev;
+       engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
+
+       engine->src_addr_widths = widths;
+       engine->dst_addr_widths = widths;
+       engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+       engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+       engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
+       engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
+       engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
+       engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
+       engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
+       engine->device_config = rcar_dmac_device_config;
+       engine->device_terminate_all = rcar_dmac_chan_terminate_all;
+       engine->device_tx_status = rcar_dmac_tx_status;
+       engine->device_issue_pending = rcar_dmac_issue_pending;
+
+       ret = dma_async_device_register(engine);
+       if (ret < 0)
+               goto error;
+
+       return 0;
+
+error:
+       of_dma_controller_free(pdev->dev.of_node);
+       pm_runtime_disable(&pdev->dev);
+       return ret;
+}
+
+static int rcar_dmac_remove(struct platform_device *pdev)
+{
+       struct rcar_dmac *dmac = platform_get_drvdata(pdev);
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&dmac->engine);
+
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+static void rcar_dmac_shutdown(struct platform_device *pdev)
+{
+       struct rcar_dmac *dmac = platform_get_drvdata(pdev);
+
+       rcar_dmac_stop(dmac);
+}
+
+static const struct of_device_id rcar_dmac_of_ids[] = {
+       { .compatible = "renesas,rcar-dmac", },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
+
+static struct platform_driver rcar_dmac_driver = {
+       .driver         = {
+               .pm     = &rcar_dmac_pm,
+               .name   = "rcar-dmac",
+               .of_match_table = rcar_dmac_of_ids,
+       },
+       .probe          = rcar_dmac_probe,
+       .remove         = rcar_dmac_remove,
+       .shutdown       = rcar_dmac_shutdown,
+};
+
+module_platform_driver(rcar_dmac_driver);
+
+MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL v2");
index 20a6f6f2a01868da26f6aeebbc95ddff0c895ae3..749f26ecd3b32b0a80a75ca27b7fbcef851ee62c 100644 (file)
@@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
 
 static int hpb_dmae_probe(struct platform_device *pdev)
 {
+       const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
+               DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
        struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
        struct hpb_dmae_device *hpbdev;
        struct dma_device *dma_dev;
@@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev)
 
        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+       dma_dev->src_addr_widths = widths;
+       dma_dev->dst_addr_widths = widths;
+       dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+       dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 
        hpbdev->shdma_dev.ops = &hpb_dmae_ops;
        hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
index 3a2adb131d462597c2ee07fe7819de75e4ef239e..8ee383d339a513187fb9c6f354cc96b8407eb22d 100644 (file)
@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
        return desc;
 }
 
-static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                         unsigned long arg)
+static int shdma_terminate_all(struct dma_chan *chan)
 {
        struct shdma_chan *schan = to_shdma_chan(chan);
        struct shdma_dev *sdev = to_shdma_dev(chan->device);
        const struct shdma_ops *ops = sdev->ops;
-       struct dma_slave_config *config;
        unsigned long flags;
-       int ret;
 
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               spin_lock_irqsave(&schan->chan_lock, flags);
-               ops->halt_channel(schan);
+       spin_lock_irqsave(&schan->chan_lock, flags);
+       ops->halt_channel(schan);
 
-               if (ops->get_partial && !list_empty(&schan->ld_queue)) {
-                       /* Record partial transfer */
-                       struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
-                                               struct shdma_desc, node);
-                       desc->partial = ops->get_partial(schan, desc);
-               }
+       if (ops->get_partial && !list_empty(&schan->ld_queue)) {
+               /* Record partial transfer */
+               struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
+                                                          struct shdma_desc, node);
+               desc->partial = ops->get_partial(schan, desc);
+       }
 
-               spin_unlock_irqrestore(&schan->chan_lock, flags);
+       spin_unlock_irqrestore(&schan->chan_lock, flags);
 
-               shdma_chan_ld_cleanup(schan, true);
-               break;
-       case DMA_SLAVE_CONFIG:
-               /*
-                * So far only .slave_id is used, but the slave drivers are
-                * encouraged to also set a transfer direction and an address.
-                */
-               if (!arg)
-                       return -EINVAL;
-               /*
-                * We could lock this, but you shouldn't be configuring the
-                * channel, while using it...
-                */
-               config = (struct dma_slave_config *)arg;
-               ret = shdma_setup_slave(schan, config->slave_id,
-                                       config->direction == DMA_DEV_TO_MEM ?
-                                       config->src_addr : config->dst_addr);
-               if (ret < 0)
-                       return ret;
-               break;
-       default:
-               return -ENXIO;
-       }
+       shdma_chan_ld_cleanup(schan, true);
 
        return 0;
 }
 
+static int shdma_config(struct dma_chan *chan,
+                       struct dma_slave_config *config)
+{
+       struct shdma_chan *schan = to_shdma_chan(chan);
+
+       /*
+        * So far only .slave_id is used, but the slave drivers are
+        * encouraged to also set a transfer direction and an address.
+        */
+       if (!config)
+               return -EINVAL;
+       /*
+        * We could lock this, but you shouldn't be configuring the
+        * channel, while using it...
+        */
+       return shdma_setup_slave(schan, config->slave_id,
+                                config->direction == DMA_DEV_TO_MEM ?
+                                config->src_addr : config->dst_addr);
+}
+
 static void shdma_issue_pending(struct dma_chan *chan)
 {
        struct shdma_chan *schan = to_shdma_chan(chan);
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
        /* Compulsory for DMA_SLAVE fields */
        dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
        dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
-       dma_dev->device_control = shdma_control;
+       dma_dev->device_config = shdma_config;
+       dma_dev->device_terminate_all = shdma_terminate_all;
 
        dma_dev->dev = dev;
 
index aec8a84784a469d70f5958db4e1126790d21806b..b2431aa300331270fa949ea5c14c41602a6c8e69 100644 (file)
@@ -588,6 +588,7 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
        sh_dmae_ctl_stop(shdev);
 }
 
+#ifdef CONFIG_PM
 static int sh_dmae_runtime_suspend(struct device *dev)
 {
        return 0;
@@ -599,8 +600,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
 
        return sh_dmae_rst(shdev);
 }
+#endif
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int sh_dmae_suspend(struct device *dev)
 {
        return 0;
@@ -632,16 +634,12 @@ static int sh_dmae_resume(struct device *dev)
 
        return 0;
 }
-#else
-#define sh_dmae_suspend NULL
-#define sh_dmae_resume NULL
 #endif
 
 static const struct dev_pm_ops sh_dmae_pm = {
-       .suspend                = sh_dmae_suspend,
-       .resume                 = sh_dmae_resume,
-       .runtime_suspend        = sh_dmae_runtime_suspend,
-       .runtime_resume         = sh_dmae_runtime_resume,
+       SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
+       SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
+                          NULL)
 };
 
 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
@@ -684,6 +682,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
 
 static int sh_dmae_probe(struct platform_device *pdev)
 {
+       const enum dma_slave_buswidth widths =
+               DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
+               DMA_SLAVE_BUSWIDTH_4_BYTES  | DMA_SLAVE_BUSWIDTH_8_BYTES |
+               DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
        const struct sh_dmae_pdata *pdata;
        unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
        int chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -746,6 +748,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
                        return PTR_ERR(shdev->dmars);
        }
 
+       dma_dev->src_addr_widths = widths;
+       dma_dev->dst_addr_widths = widths;
+       dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+       dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
        if (!pdata->slave_only)
                dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
        if (pdata->slave && pdata->slave_num)
index 3492a5f91d31553219189628148947a6af6ca4eb..d0086e9f20824efa697144a8ea4f5adf345f3635 100644 (file)
@@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
        return cookie;
 }
 
-static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
-       struct dma_slave_config *config)
+static int sirfsoc_dma_slave_config(struct dma_chan *chan,
+                                   struct dma_slave_config *config)
 {
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
        unsigned long flags;
 
        if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
@@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
        return 0;
 }
 
-static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
 {
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
        int cid = schan->chan.chan_id;
        unsigned long flags;
@@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
        return 0;
 }
 
-static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
+static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
 {
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
        int cid = schan->chan.chan_id;
        unsigned long flags;
@@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
        return 0;
 }
 
-static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
+static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
 {
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
        int cid = schan->chan.chan_id;
        unsigned long flags;
@@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
        return 0;
 }
 
-static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-       unsigned long arg)
-{
-       struct dma_slave_config *config;
-       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
-
-       switch (cmd) {
-       case DMA_PAUSE:
-               return sirfsoc_dma_pause_chan(schan);
-       case DMA_RESUME:
-               return sirfsoc_dma_resume_chan(schan);
-       case DMA_TERMINATE_ALL:
-               return sirfsoc_dma_terminate_all(schan);
-       case DMA_SLAVE_CONFIG:
-               config = (struct dma_slave_config *)arg;
-               return sirfsoc_dma_slave_config(schan, config);
-
-       default:
-               break;
-       }
-
-       return -ENOSYS;
-}
-
 /* Alloc channel resources */
 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
 {
@@ -648,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id);
        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 
-static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
-       struct dma_slave_caps *caps)
-{
-       caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
-       caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
-       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       caps->cmd_pause = true;
-       caps->cmd_terminate = true;
-
-       return 0;
-}
-
 static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
        struct of_dma *ofdma)
 {
@@ -739,11 +707,16 @@ static int sirfsoc_dma_probe(struct platform_device *op)
        dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
        dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
        dma->device_issue_pending = sirfsoc_dma_issue_pending;
-       dma->device_control = sirfsoc_dma_control;
+       dma->device_config = sirfsoc_dma_slave_config;
+       dma->device_pause = sirfsoc_dma_pause_chan;
+       dma->device_resume = sirfsoc_dma_resume_chan;
+       dma->device_terminate_all = sirfsoc_dma_terminate_all;
        dma->device_tx_status = sirfsoc_dma_tx_status;
        dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
        dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
-       dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
+       dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+       dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+       dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 
        INIT_LIST_HEAD(&dma->channels);
        dma_cap_set(DMA_SLAVE, dma->cap_mask);
index 15d49461c0d2c4b32eddc650ccfb8c1ceb78308b..68aca3334a17e4fcc9fba62af4e89f082d5c681f 100644 (file)
@@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
        return is_link;
 }
 
-static int d40_pause(struct d40_chan *d40c)
+static int d40_pause(struct dma_chan *chan)
 {
+       struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
        int res = 0;
        unsigned long flags;
 
+       if (d40c->phy_chan == NULL) {
+               chan_err(d40c, "Channel is not allocated!\n");
+               return -EINVAL;
+       }
+
        if (!d40c->busy)
                return 0;
 
@@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c)
        return res;
 }
 
-static int d40_resume(struct d40_chan *d40c)
+static int d40_resume(struct dma_chan *chan)
 {
+       struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
        int res = 0;
        unsigned long flags;
 
+       if (d40c->phy_chan == NULL) {
+               chan_err(d40c, "Channel is not allocated!\n");
+               return -EINVAL;
+       }
+
        if (!d40c->busy)
                return 0;
 
@@ -2604,12 +2616,17 @@ static void d40_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&d40c->lock, flags);
 }
 
-static void d40_terminate_all(struct dma_chan *chan)
+static int d40_terminate_all(struct dma_chan *chan)
 {
        unsigned long flags;
        struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
        int ret;
 
+       if (d40c->phy_chan == NULL) {
+               chan_err(d40c, "Channel is not allocated!\n");
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&d40c->lock, flags);
 
        pm_runtime_get_sync(d40c->base->dev);
@@ -2627,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan)
        d40c->busy = false;
 
        spin_unlock_irqrestore(&d40c->lock, flags);
+       return 0;
 }
 
 static int
@@ -2673,6 +2691,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
        u32 src_maxburst, dst_maxburst;
        int ret;
 
+       if (d40c->phy_chan == NULL) {
+               chan_err(d40c, "Channel is not allocated!\n");
+               return -EINVAL;
+       }
+
        src_addr_width = config->src_addr_width;
        src_maxburst = config->src_maxburst;
        dst_addr_width = config->dst_addr_width;
@@ -2781,35 +2804,6 @@ static int d40_set_runtime_config(struct dma_chan *chan,
        return 0;
 }
 
-static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                      unsigned long arg)
-{
-       struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
-
-       if (d40c->phy_chan == NULL) {
-               chan_err(d40c, "Channel is not allocated!\n");
-               return -EINVAL;
-       }
-
-       switch (cmd) {
-       case DMA_TERMINATE_ALL:
-               d40_terminate_all(chan);
-               return 0;
-       case DMA_PAUSE:
-               return d40_pause(d40c);
-       case DMA_RESUME:
-               return d40_resume(d40c);
-       case DMA_SLAVE_CONFIG:
-               return d40_set_runtime_config(chan,
-                       (struct dma_slave_config *) arg);
-       default:
-               break;
-       }
-
-       /* Other commands are unimplemented */
-       return -ENXIO;
-}
-
 /* Initialization functions */
 
 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -2870,7 +2864,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
        dev->device_free_chan_resources = d40_free_chan_resources;
        dev->device_issue_pending = d40_issue_pending;
        dev->device_tx_status = d40_tx_status;
-       dev->device_control = d40_control;
+       dev->device_config = d40_set_runtime_config;
+       dev->device_pause = d40_pause;
+       dev->device_resume = d40_resume;
+       dev->device_terminate_all = d40_terminate_all;
        dev->dev = base->dev;
 }
 
index 159f1736a16f7cbb334bcffbfa1c66d18b22a120..7ebcf9bec6984c675b0ad42f50879171e9e5f83c 100644 (file)
@@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
        kfree(txd);
 }
 
-static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
-{
-       struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
-       struct sun6i_pchan *pchan = vchan->phy;
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       spin_lock(&sdev->lock);
-       list_del_init(&vchan->node);
-       spin_unlock(&sdev->lock);
-
-       spin_lock_irqsave(&vchan->vc.lock, flags);
-
-       vchan_get_all_descriptors(&vchan->vc, &head);
-
-       if (pchan) {
-               writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
-               writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
-
-               vchan->phy = NULL;
-               pchan->vchan = NULL;
-               pchan->desc = NULL;
-               pchan->done = NULL;
-       }
-
-       spin_unlock_irqrestore(&vchan->vc.lock, flags);
-
-       vchan_dma_desc_free_list(&vchan->vc, &head);
-
-       return 0;
-}
-
 static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
 {
        struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
@@ -675,57 +643,92 @@ err_lli_free:
        return NULL;
 }
 
-static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                      unsigned long arg)
+static int sun6i_dma_config(struct dma_chan *chan,
+                           struct dma_slave_config *config)
+{
+       struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+
+       memcpy(&vchan->cfg, config, sizeof(*config));
+
+       return 0;
+}
+
+static int sun6i_dma_pause(struct dma_chan *chan)
+{
+       struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+       struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+       struct sun6i_pchan *pchan = vchan->phy;
+
+       dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
+
+       if (pchan) {
+               writel(DMA_CHAN_PAUSE_PAUSE,
+                      pchan->base + DMA_CHAN_PAUSE);
+       } else {
+               spin_lock(&sdev->lock);
+               list_del_init(&vchan->node);
+               spin_unlock(&sdev->lock);
+       }
+
+       return 0;
+}
+
+static int sun6i_dma_resume(struct dma_chan *chan)
 {
        struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
        struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
        struct sun6i_pchan *pchan = vchan->phy;
        unsigned long flags;
-       int ret = 0;
 
-       switch (cmd) {
-       case DMA_RESUME:
-               dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+       dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
 
-               spin_lock_irqsave(&vchan->vc.lock, flags);
+       spin_lock_irqsave(&vchan->vc.lock, flags);
 
-               if (pchan) {
-                       writel(DMA_CHAN_PAUSE_RESUME,
-                              pchan->base + DMA_CHAN_PAUSE);
-               } else if (!list_empty(&vchan->vc.desc_issued)) {
-                       spin_lock(&sdev->lock);
-                       list_add_tail(&vchan->node, &sdev->pending);
-                       spin_unlock(&sdev->lock);
-               }
+       if (pchan) {
+               writel(DMA_CHAN_PAUSE_RESUME,
+                      pchan->base + DMA_CHAN_PAUSE);
+       } else if (!list_empty(&vchan->vc.desc_issued)) {
+               spin_lock(&sdev->lock);
+               list_add_tail(&vchan->node, &sdev->pending);
+               spin_unlock(&sdev->lock);
+       }
 
-               spin_unlock_irqrestore(&vchan->vc.lock, flags);
-               break;
+       spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
-       case DMA_PAUSE:
-               dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
+       return 0;
+}
 
-               if (pchan) {
-                       writel(DMA_CHAN_PAUSE_PAUSE,
-                              pchan->base + DMA_CHAN_PAUSE);
-               } else {
-                       spin_lock(&sdev->lock);
-                       list_del_init(&vchan->node);
-                       spin_unlock(&sdev->lock);
-               }
-               break;
-
-       case DMA_TERMINATE_ALL:
-               ret = sun6i_dma_terminate_all(vchan);
-               break;
-       case DMA_SLAVE_CONFIG:
-               memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config));
-               break;
-       default:
-               ret = -ENXIO;
-               break;
+static int sun6i_dma_terminate_all(struct dma_chan *chan)
+{
+       struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+       struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+       struct sun6i_pchan *pchan = vchan->phy;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock(&sdev->lock);
+       list_del_init(&vchan->node);
+       spin_unlock(&sdev->lock);
+
+       spin_lock_irqsave(&vchan->vc.lock, flags);
+
+       vchan_get_all_descriptors(&vchan->vc, &head);
+
+       if (pchan) {
+               writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
+               writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
+
+               vchan->phy = NULL;
+               pchan->vchan = NULL;
+               pchan->desc = NULL;
+               pchan->done = NULL;
        }
-       return ret;
+
+       spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
+       return 0;
 }
 
 static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
@@ -960,9 +963,20 @@ static int sun6i_dma_probe(struct platform_device *pdev)
        sdc->slave.device_issue_pending         = sun6i_dma_issue_pending;
        sdc->slave.device_prep_slave_sg         = sun6i_dma_prep_slave_sg;
        sdc->slave.device_prep_dma_memcpy       = sun6i_dma_prep_dma_memcpy;
-       sdc->slave.device_control               = sun6i_dma_control;
        sdc->slave.copy_align                   = 4;
-
+       sdc->slave.device_config                = sun6i_dma_config;
+       sdc->slave.device_pause                 = sun6i_dma_pause;
+       sdc->slave.device_resume                = sun6i_dma_resume;
+       sdc->slave.device_terminate_all         = sun6i_dma_terminate_all;
+       sdc->slave.src_addr_widths              = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+                                                 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+                                                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       sdc->slave.dst_addr_widths              = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+                                                 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+                                                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       sdc->slave.directions                   = BIT(DMA_DEV_TO_MEM) |
+                                                 BIT(DMA_MEM_TO_DEV);
+       sdc->slave.residue_granularity          = DMA_RESIDUE_GRANULARITY_BURST;
        sdc->slave.dev = &pdev->dev;
 
        sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
index d8450c3f35f0ed901e9eff8392e44842ff48598d..eaf585e8286b491da7dba4af594e59f0649b7f4d 100644 (file)
@@ -723,7 +723,7 @@ end:
        return;
 }
 
-static void tegra_dma_terminate_all(struct dma_chan *dc)
+static int tegra_dma_terminate_all(struct dma_chan *dc)
 {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        struct tegra_dma_sg_req *sgreq;
@@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
        spin_lock_irqsave(&tdc->lock, flags);
        if (list_empty(&tdc->pending_sg_req)) {
                spin_unlock_irqrestore(&tdc->lock, flags);
-               return;
+               return 0;
        }
 
        if (!tdc->busy)
@@ -777,6 +777,7 @@ skip_dma_stop:
                dma_desc->cb_count = 0;
        }
        spin_unlock_irqrestore(&tdc->lock, flags);
+       return 0;
 }
 
 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
@@ -827,25 +828,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
        return ret;
 }
 
-static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
-                       unsigned long arg)
-{
-       switch (cmd) {
-       case DMA_SLAVE_CONFIG:
-               return tegra_dma_slave_config(dc,
-                               (struct dma_slave_config *)arg);
-
-       case DMA_TERMINATE_ALL:
-               tegra_dma_terminate_all(dc);
-               return 0;
-
-       default:
-               break;
-       }
-
-       return -ENXIO;
-}
-
 static inline int get_bus_width(struct tegra_dma_channel *tdc,
                enum dma_slave_buswidth slave_bw)
 {
@@ -1443,7 +1425,23 @@ static int tegra_dma_probe(struct platform_device *pdev)
                                        tegra_dma_free_chan_resources;
        tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
        tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
-       tdma->dma_dev.device_control = tegra_dma_device_control;
+       tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+       tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+       tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       /*
+        * XXX The hardware appears to support
+        * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
+        * only used by this driver during tegra_dma_terminate_all()
+        */
+       tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       tdma->dma_dev.device_config = tegra_dma_slave_config;
+       tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
        tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
        tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
 
index 2407ccf1a64b29583165d9fdb0d5638e4dd6f1de..c4c3d93fdd1bf26b97425b79ac5bb516b3edca5a 100644 (file)
@@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
        return &td_desc->txd;
 }
 
-static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                     unsigned long arg)
+static int td_terminate_all(struct dma_chan *chan)
 {
        struct timb_dma_chan *td_chan =
                container_of(chan, struct timb_dma_chan, chan);
@@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
        dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
 
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
-
        /* first the easy part, put the queue into the free list */
        spin_lock_bh(&td_chan->lock);
        list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev)
        dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
        dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
        td->dma.device_prep_slave_sg = td_prep_slave_sg;
-       td->dma.device_control = td_control;
+       td->dma.device_terminate_all = td_terminate_all;
 
        td->dma.dev = &pdev->dev;
 
index 0659ec9c44884ddd0ad8522979606c83b9632571..8849318b32b7ab5b48f14ea8304027f8a0da7fd9 100644 (file)
@@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        return &first->txd;
 }
 
-static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-                           unsigned long arg)
+static int txx9dmac_terminate_all(struct dma_chan *chan)
 {
        struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
        struct txx9dmac_desc *desc, *_desc;
        LIST_HEAD(list);
 
-       /* Only supports DMA_TERMINATE_ALL */
-       if (cmd != DMA_TERMINATE_ALL)
-               return -EINVAL;
-
        dev_vdbg(chan2dev(chan), "terminate_all\n");
        spin_lock_bh(&dc->lock);
 
@@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
        dc->dma.dev = &pdev->dev;
        dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
        dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
-       dc->dma.device_control = txx9dmac_control;
+       dc->dma.device_terminate_all = txx9dmac_terminate_all;
        dc->dma.device_tx_status = txx9dmac_tx_status;
        dc->dma.device_issue_pending = txx9dmac_issue_pending;
        if (pdata && pdata->memcpy_chan == ch) {
index 4a3a8f3137b3c41d4969c245c5686e12b94104ec..bdd2a5dd7220cbb2067732fbb79b49fcbc7a09d7 100644 (file)
@@ -1001,13 +1001,17 @@ error:
  * xilinx_vdma_terminate_all - Halt the channel and free descriptors
  * @chan: Driver specific VDMA Channel pointer
  */
-static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan)
+static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
 {
+       struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
        /* Halt the DMA engine */
        xilinx_vdma_halt(chan);
 
        /* Remove and free all of the descriptors in the lists */
        xilinx_vdma_free_descriptors(chan);
+
+       return 0;
 }
 
 /**
@@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
 }
 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
 
-/**
- * xilinx_vdma_device_control - Configure DMA channel of the device
- * @dchan: DMA Channel pointer
- * @cmd: DMA control command
- * @arg: Channel configuration
- *
- * Return: '0' on success and failure value on error
- */
-static int xilinx_vdma_device_control(struct dma_chan *dchan,
-                                     enum dma_ctrl_cmd cmd, unsigned long arg)
-{
-       struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
-
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
-
-       xilinx_vdma_terminate_all(chan);
-
-       return 0;
-}
-
 /* -----------------------------------------------------------------------------
  * Probe and remove
  */
@@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
                                xilinx_vdma_free_chan_resources;
        xdev->common.device_prep_interleaved_dma =
                                xilinx_vdma_dma_prep_interleaved;
-       xdev->common.device_control = xilinx_vdma_device_control;
+       xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
        xdev->common.device_tx_status = xilinx_vdma_tx_status;
        xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
 
index 17638d7cf5c279a3b2fd63aa2250939a8c059388..5907c1718f8c74fbe4a7d2c3bceb21759ed07422 100644 (file)
@@ -2174,14 +2174,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
 
 static inline void decode_bus_error(int node_id, struct mce *m)
 {
-       struct mem_ctl_info *mci = mcis[node_id];
-       struct amd64_pvt *pvt = mci->pvt_info;
+       struct mem_ctl_info *mci;
+       struct amd64_pvt *pvt;
        u8 ecc_type = (m->status >> 45) & 0x3;
        u8 xec = XEC(m->status, 0x1f);
        u16 ec = EC(m->status);
        u64 sys_addr;
        struct err_info err;
 
+       mci = edac_mc_find(node_id);
+       if (!mci)
+               return;
+
+       pvt = mci->pvt_info;
+
        /* Bail out early if this was an 'observed' error */
        if (PP(ec) == NBSL_PP_OBS)
                return;
index 63aa6730e89ea511492f6e101f48107fa59c8c3e..1acf57ba4c86bdc95b2e6d4d8281295f5568f6fc 100644 (file)
@@ -2447,7 +2447,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
                type = IVY_BRIDGE;
                break;
-       case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
+       case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
                rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
                type = SANDY_BRIDGE;
                break;
@@ -2460,8 +2460,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                type = BROADWELL;
                break;
        }
-       if (unlikely(rc < 0))
+       if (unlikely(rc < 0)) {
+               edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
                goto fail0;
+       }
+
        mc = 0;
 
        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
@@ -2474,7 +2477,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto fail1;
        }
 
-       sbridge_printk(KERN_INFO, "Driver loaded.\n");
+       sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
 
        mutex_unlock(&sbridge_edac_lock);
        return 0;
index eb6935c8ad9449bb1c557ac8c596940fc2919096..d6a09b9cd8ccae2c35cee866ac35b1d4ba066975 100644 (file)
@@ -1246,14 +1246,14 @@ static const u32 model_textual_descriptor[] = {
 
 static struct fw_descriptor vendor_id_descriptor = {
        .length = ARRAY_SIZE(vendor_textual_descriptor),
-       .immediate = 0x03d00d1e,
+       .immediate = 0x03001f11,
        .key = 0x81000000,
        .data = vendor_textual_descriptor,
 };
 
 static struct fw_descriptor model_id_descriptor = {
        .length = ARRAY_SIZE(model_textual_descriptor),
-       .immediate = 0x17000001,
+       .immediate = 0x17023901,
        .key = 0x81000000,
        .data = model_textual_descriptor,
 };
index aff9018d06588d7b37cf6f836cb62d12d0597bf0..f51d376d10ba64e9bc5c8e2ecb953582011f90e9 100644 (file)
@@ -718,11 +718,6 @@ static inline unsigned int ar_next_buffer_index(unsigned int index)
        return (index + 1) % AR_BUFFERS;
 }
 
-static inline unsigned int ar_prev_buffer_index(unsigned int index)
-{
-       return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
-}
-
 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
 {
        return ar_next_buffer_index(ctx->last_buffer_index);
index 64ac8f8f5098875edede0f356fd24226ce5bc49d..c22606fe3d44bf06d273a657917a12fda4ce39de 100644 (file)
@@ -1463,17 +1463,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
        struct sbp2_command_orb *orb;
        int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
 
-       /*
-        * Bidirectional commands are not yet implemented, and unknown
-        * transfer direction not handled.
-        */
-       if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
-               dev_err(lu_dev(lu), "cannot handle bidirectional command\n");
-               cmd->result = DID_ERROR << 16;
-               cmd->scsi_done(cmd);
-               return 0;
-       }
-
        orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
        if (orb == NULL)
                return SCSI_MLQUEUE_HOST_BUSY;
index af5d63c7cc53ded6b5ab4130b2b9279a5d52e988..2fe195002021d079ec36515a7ebba4385c1f3600 100644 (file)
@@ -75,29 +75,25 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
        unsigned long key;
        u32 desc_version;
 
-       *map_size = 0;
-       *desc_size = 0;
-       key = 0;
-       status = efi_call_early(get_memory_map, map_size, NULL,
-                               &key, desc_size, &desc_version);
-       if (status != EFI_BUFFER_TOO_SMALL)
-               return EFI_LOAD_ERROR;
-
+       *map_size = sizeof(*m) * 32;
+again:
        /*
         * Add an additional efi_memory_desc_t because we're doing an
         * allocation which may be in a new descriptor region.
         */
-       *map_size += *desc_size;
+       *map_size += sizeof(*m);
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                *map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
+       *desc_size = 0;
+       key = 0;
        status = efi_call_early(get_memory_map, map_size, m,
                                &key, desc_size, &desc_version);
        if (status == EFI_BUFFER_TOO_SMALL) {
                efi_call_early(free_pool, m);
-               return EFI_LOAD_ERROR;
+               goto again;
        }
 
        if (status != EFI_SUCCESS)
index 472fb5b8779f3723f969998b54716b3081901649..9cdbc0c9cb2da87abc65dda442fc75a1612cc51e 100644 (file)
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
        struct gpio_chip gpio_chip;
 };
 
+#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
+
 static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
        int val;
 
        val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
                              int value)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
        if (value)
                tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
 static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
                                int value)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
        /* Set the initial value */
        tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
 
 static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
        return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
                                                                GPIO_CFG_MASK);
index 8cad8e400b44d674ad144e817daa7b63139f2041..4650bf830d6b6306f96e309d4f2782da8a859575 100644 (file)
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
 
        ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
        if (ret < 0) {
-               /* We've found the gpio chip, but the translation failed.
-                * Return true to stop looking and return the translation
-                * error via out_gpio
+               /* We've found a gpio chip, but the translation failed.
+                * Store translation error in out_gpio.
+                * Return false to keep looking, as more than one gpio chip
+                * could be registered per of-node.
                 */
                gg_data->out_gpio = ERR_PTR(ret);
-               return true;
+               return false;
         }
 
        gg_data->out_gpio = gpiochip_get_desc(gc, ret);
index b3589d0e39b9c2027a4e680625663dfdcbe3cc63..910ff8ab9c9cfb018c6a18e8ae3126767b6e2580 100644 (file)
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
        return KFD_MQD_TYPE_CP;
 }
 
-static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+unsigned int get_first_pipe(struct device_queue_manager *dqm)
 {
-       BUG_ON(!dqm);
+       BUG_ON(!dqm || !dqm->dev);
        return dqm->dev->shared_resources.first_compute_pipe;
 }
 
+unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+       BUG_ON(!dqm || !dqm->dev);
+       return dqm->dev->shared_resources.compute_pipe_count;
+}
+
 static inline unsigned int get_pipes_num_cpsch(void)
 {
        return PIPE_PER_ME_CP_SCHEDULING;
index d64f86cda34f5a155e176526d5f3a3463b96c826..488f51d19427a8273b672df18962a4dafe9feaf5 100644 (file)
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd);
 int init_pipelines(struct device_queue_manager *dqm,
                unsigned int pipes_num, unsigned int first_pipe);
+unsigned int get_first_pipe(struct device_queue_manager *dqm);
+unsigned int get_pipes_num(struct device_queue_manager *dqm);
 
 extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
 {
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
        return (pdd->lds_base >> 60) & 0x0E;
 }
 
-extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
-{
-       BUG_ON(!dqm || !dqm->dev);
-       return dqm->dev->shared_resources.compute_pipe_count;
-}
-
 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
index 6b072466e2a6f628c8a1ff4a026cf5788af65c43..5469efe0523e8d85b11ad33729bfed9036051dfa 100644 (file)
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
 
 static int initialize_cpsch_cik(struct device_queue_manager *dqm)
 {
-       return init_pipelines(dqm, get_pipes_num(dqm), 0);
+       return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
 }
index 0409b907de5d5cc771543d17cc50ed20bad3e3ba..b3e3068c6ec07f8caa8bac6a4edc68e75741190d 100644 (file)
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
                     (adj->crtc_hdisplay - 1) |
                     ((adj->crtc_vdisplay - 1) << 16));
 
-       cfg = ATMEL_HLCDC_CLKPOL;
+       cfg = 0;
 
        prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
        mode_rate = mode->crtc_clock * 1000;
index 7320a6c6613f174c1776f75c6dcc832cf91f1ad8..c1cb17493e0d4e212821c832009012c2384c7143 100644 (file)
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
        pm_runtime_enable(dev->dev);
 
-       pm_runtime_put_sync(dev->dev);
-
        ret = atmel_hlcdc_dc_modeset_init(dev);
        if (ret < 0) {
                dev_err(dev->dev, "failed to initialize mode setting\n");
index 063d2a7b941fcaa4f5b527afd41a193fd382d64d..e79bd9ba474b3c181140b0d9f519a96066443acd 100644 (file)
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
 
        /* Disable the layer */
        regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
-                    ATMEL_HLCDC_LAYER_RST);
+                    ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
+                    ATMEL_HLCDC_LAYER_UPDATE);
 
        /* Clear all pending interrupts */
        regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
index 6b00173d1be4fd6c96f749a803dd23ab5495de88..6b6b07ff720ba612d3f3a82f1063d0432f28da1f 100644 (file)
@@ -2127,7 +2127,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
 
        mutex_lock(&dev->mode_config.mutex);
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
        connector = drm_connector_find(dev, out_resp->connector_id);
        if (!connector) {
@@ -2157,6 +2156,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        out_resp->mm_height = connector->display_info.height_mm;
        out_resp->subpixel = connector->display_info.subpixel_order;
        out_resp->connection = connector->status;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
        encoder = drm_connector_get_encoder(connector);
        if (encoder)
                out_resp->encoder_id = encoder->base.id;
index f2a825e39646427b7e4617627bd8ab3c3b9acb4d..8727086cf48ccce9e6548df8cf4e1d0df59012e7 100644 (file)
@@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
  * number comparisons on buffer last_read|write_seqno. It also allows an
  * emission time to be associated with the request for tracking how far ahead
  * of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
  */
 struct drm_i915_gem_request {
        struct kref ref;
@@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
        /** Position in the ringbuffer of the end of the whole request */
        u32 tail;
 
-       /** Context related to this request */
+       /**
+        * Context related to this request
+        * Contexts are refcounted, so when this request is associated with a
+        * context, we must increment the context's refcount, to guarantee that
+        * it persists while any request is linked to it. Requests themselves
+        * are also refcounted, so the request will only be freed when the last
+        * reference to it is dismissed, and the code in
+        * i915_gem_request_free() will then decrement the refcount on the
+        * context.
+        */
        struct intel_context *ctx;
 
        /** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
                                 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
+                                (INTEL_DEVID(dev) & 0xf) == 0xb ||     \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
index c26d36cc4b313ac4d03ade4167739d7fd7be6e9c..e5daad5f75fb96ca2a0b8aa9a2f4a9ccecf8a650 100644 (file)
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                if (submit_req->ctx != ring->default_context)
                        intel_lr_context_unpin(ring, submit_req->ctx);
 
-               i915_gem_context_unreference(submit_req->ctx);
-               kfree(submit_req);
+               i915_gem_request_unreference(submit_req);
        }
 
        /*
index a2045848bd1a3d5d37c299d34b7e19dce50173d7..9c6f93ec886b7023b0ce8fa2753bacaaf041aaff 100644 (file)
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                        stolen_offset, gtt_offset, size);
 
        /* KISS and expect everything to be page-aligned */
-       BUG_ON(stolen_offset & 4095);
-       BUG_ON(size & 4095);
-
-       if (WARN_ON(size == 0))
+       if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
+           WARN_ON(stolen_offset & 4095))
                return NULL;
 
        stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
index 7a24bd1a51f648b340ce15d5ee98771cd1fd77d9..6377b22269ad1e7157058baf1447cb548362306c 100644 (file)
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       mutex_lock(&dev->struct_mutex);
        if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
-               drm_gem_object_unreference_unlocked(&obj->base);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err;
        }
 
        if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                }
        }
 
-       mutex_lock(&dev->struct_mutex);
        if (args->tiling_mode != obj->tiling_mode ||
            args->stride != obj->stride) {
                /* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                obj->bit_17 = NULL;
        }
 
+err:
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 
index 4145d95902f54fbd9fb4f92668fe10fde2b330a0..ede5bbbd8a08a175873e24215a754b5be28ef23d 100644 (file)
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
        u32 iir, gt_iir, pm_iir;
        irqreturn_t ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        while (true) {
                /* Find, clear, then process each source of interrupt */
 
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        u32 master_ctl, iir;
        irqreturn_t ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        for (;;) {
                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
                iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        /* We get interrupts on unclaimed registers, so check for this before we
         * do any I915_{READ,WRITE}. */
        intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        enum pipe pipe;
        u32 aux_mask = GEN8_AUX_CHANNEL_A;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        if (IS_GEN9(dev))
                aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        iir = I915_READ16(IIR);
        if (iir == 0)
                return IRQ_NONE;
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
        int pipe, ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        iir = I915_READ(IIR);
        do {
                bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        iir = I915_READ(IIR);
 
        for (;;) {
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 {
        dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
        dev_priv->pm.irqs_enabled = false;
+       synchronize_irq(dev_priv->dev->irq);
 }
 
 /**
index 3d220a67f8656ed9173b88672cd114893070aec0..e730789b53b7b0c141bada8400b398f67149275b 100644 (file)
@@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_gem_object *obj = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-       u32 base = plane_config->base;
+       u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
+       u32 size_aligned = round_up(plane_config->base + plane_config->size,
+                                   PAGE_SIZE);
+
+       size_aligned -= base_aligned;
 
        if (plane_config->size == 0)
                return false;
 
-       obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
-                                                            plane_config->size);
+       obj = i915_gem_object_create_stolen_for_preallocated(dev,
+                                                            base_aligned,
+                                                            base_aligned,
+                                                            size_aligned);
        if (!obj)
                return false;
 
@@ -2725,10 +2731,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        case DRM_FORMAT_XRGB8888:
                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
                break;
+       case DRM_FORMAT_ARGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
        case DRM_FORMAT_XBGR8888:
                plane_ctl |= PLANE_CTL_ORDER_RGBX;
                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
                break;
+       case DRM_FORMAT_ABGR8888:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
        case DRM_FORMAT_XRGB2101010:
                plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
                break;
@@ -6627,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
        aligned_height = intel_fb_align_height(dev, fb->height,
                                               plane_config->tiling);
 
-       plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+       plane_config->size = fb->pitches[0] * aligned_height;
 
        DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
                      pipe_name(pipe), plane, fb->width, fb->height,
@@ -7664,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        aligned_height = intel_fb_align_height(dev, fb->height,
                                               plane_config->tiling);
 
-       plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+       plane_config->size = fb->pitches[0] * aligned_height;
 
        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
                      pipe_name(pipe), fb->width, fb->height,
@@ -7755,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
        aligned_height = intel_fb_align_height(dev, fb->height,
                                               plane_config->tiling);
 
-       plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+       plane_config->size = fb->pitches[0] * aligned_height;
 
        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
                      pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8713,7 @@ retry:
                        old->release_fb->funcs->destroy(old->release_fb);
                goto fail;
        }
+       crtc->primary->crtc = crtc;
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -12182,9 +12198,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
                return -ENOMEM;
        }
 
-       if (fb == crtc->cursor->fb)
-               return 0;
-
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
        if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13109,9 @@ static struct intel_quirk intel_quirks[] = {
 
        /* HP Chromebook 14 (Celeron 2955U) */
        { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+       /* Dell Chromebook 11 */
+       { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
index 0f358c5999ec8e0c8771a3d932447d281748a8b1..e8d3da9f337388e5a1647766a61281e4d25aa22d 100644 (file)
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                 * If there isn't a request associated with this submission,
                 * create one as a temporary holder.
                 */
-               WARN(1, "execlist context submission without request");
                request = kzalloc(sizeof(*request), GFP_KERNEL);
                if (request == NULL)
                        return -ENOMEM;
                request->ring = ring;
                request->ctx = to;
+               kref_init(&request->ref);
+               request->uniq = dev_priv->request_uniq++;
+               i915_gem_context_reference(request->ctx);
        } else {
+               i915_gem_request_reference(request);
                WARN_ON(to != request->ctx);
        }
        request->tail = tail;
-       i915_gem_request_reference(request);
-       i915_gem_context_reference(request->ctx);
 
        intel_runtime_pm_get(dev_priv);
 
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
                if (ctx_obj && (ctx != ring->default_context))
                        intel_lr_context_unpin(ring, ctx);
                intel_runtime_pm_put(dev_priv);
-               i915_gem_context_unreference(ctx);
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
index 5bf825dfaa098ec6b6ad8f479856ce676f51ba21..8d74de82456e880cb01d7e8132cf6c1caf5fa864 100644 (file)
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_WRITE:
        case DP_AUX_I2C_WRITE:
+               /* The atom implementation only supports writes with a max payload of
+                * 12 bytes since it uses 4 bits for the total count (header + payload)
+                * in the parameter space.  The atom interface supports 16 byte
+                * payloads for reads. The hw itself supports up to 16 bytes of payload.
+                */
+               if (WARN_ON_ONCE(msg->size > 12))
+                       return -E2BIG;
                /* tx_size needs to be 4 even for bare address packets since the atom
                 * table needs the info in tx_buf[3].
                 */
index 7c9df1eac065948df99491b3819427e32ad425d7..7fe7b749e182543b5c742e3b7fba76eeeffcc783 100644 (file)
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                dig_connector = radeon_connector->con_priv;
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
-                       if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+                       if (radeon_audio != 0 &&
+                           drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+                           ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
                                return ATOM_ENCODER_MODE_DP_AUDIO;
                        return ATOM_ENCODER_MODE_DP;
                } else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                }
                break;
        case DRM_MODE_CONNECTOR_eDP:
-               if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+               if (radeon_audio != 0 &&
+                   drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+                   ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
                        return ATOM_ENCODER_MODE_DP_AUDIO;
                return ATOM_ENCODER_MODE_DP;
        case DRM_MODE_CONNECTOR_DVIA:
@@ -1720,8 +1724,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        }
 
        encoder_mode = atombios_get_encoder_mode(encoder);
-       if (radeon_audio != 0 &&
-               (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
+       if (connector && (radeon_audio != 0) &&
+           ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+            (ENCODER_MODE_IS_DP(encoder_mode) &&
+             drm_detect_monitor_audio(radeon_connector_edid(connector)))))
                radeon_audio_dpms(encoder, mode);
 }
 
@@ -2136,6 +2142,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        int encoder_mode;
 
        radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2164,8 +2171,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
                /* handled in dpms */
                encoder_mode = atombios_get_encoder_mode(encoder);
-               if (radeon_audio != 0 &&
-                       (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
+               if (connector && (radeon_audio != 0) &&
+                   ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+                    (ENCODER_MODE_IS_DP(encoder_mode) &&
+                     drm_detect_monitor_audio(radeon_connector_edid(connector)))))
                        radeon_audio_mode_set(encoder, adjusted_mode);
                break;
        case ENCODER_OBJECT_ID_INTERNAL_DDI:
index e6a4ba236c703dc812d8bc57035408cb9ac5821f..0c993da9c8fb0503e9658339d10d6b28ac90a66c 100644 (file)
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 0x1);
+       WREG32(SRBM_INT_ACK, 0x1);
 
        WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
        WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
        /* grbm */
        WREG32(GRBM_INT_CNTL, 0);
+       /* SRBM */
+       WREG32(SRBM_INT_CNTL, 0);
        /* vline/vblank, etc. */
        WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -8046,6 +8050,10 @@ restart_ih:
                                break;
                        }
                        break;
+               case 96:
+                       DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+                       WREG32(SRBM_INT_ACK, 0x1);
+                       break;
                case 124: /* UVD */
                        DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
                        radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
index 03003f8a6de63ba00c741824c053070a009cd319..c648e1996dabac449dfb838e018cad85b2d3bb61 100644 (file)
 #define                SOFT_RESET_ORB                          (1 << 23)
 #define                SOFT_RESET_VCE                          (1 << 24)
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 #define VM_L2_CNTL                                     0x1400
 #define                ENABLE_L2_CACHE                                 (1 << 0)
 #define                ENABLE_L2_FRAGMENT_PROCESSING                   (1 << 1)
index 78600f534c804b745b99f7aea8688381b4204182..4c0e24b3bb9022aac8cfe59868022c096941dd60 100644 (file)
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 0x1);
+       WREG32(SRBM_INT_ACK, 0x1);
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
        tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
        WREG32(DMA_CNTL, tmp);
        WREG32(GRBM_INT_CNTL, 0);
+       WREG32(SRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
        if (rdev->num_crtc >= 4) {
@@ -5066,6 +5069,10 @@ restart_ih:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
                                break;
                        }
+               case 96:
+                       DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+                       WREG32(SRBM_INT_ACK, 0x1);
+                       break;
                case 124: /* UVD */
                        DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
                        radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
index ee83d2a88750aafb865c30dd89e42c2041700a61..a8d1d5240fcb3088d1ea391ebcc8955c46f8c237 100644 (file)
 #define                SOFT_RESET_REGBB                        (1 << 22)
 #define                SOFT_RESET_ORB                          (1 << 23)
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 /* display watermarks */
 #define        DC_LB_MEMORY_SPLIT                                0x6b0c
 #define        PRIORITY_A_CNT                                    0x6b18
index 24242a7f0ac3d728c4c69366f8af1077ecc25190..dab00812abaabeeeee6295041e730143c99fecba 100644 (file)
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 0x1);
+       WREG32(SRBM_INT_ACK, 0x1);
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        if ((rdev->config.cayman.max_backends_per_se == 1) &&
            (rdev->flags & RADEON_IS_IGP)) {
-               if ((disabled_rb_mask & 3) == 1) {
-                       /* RB0 disabled, RB1 enabled */
-                       tmp = 0x11111111;
-               } else {
+               if ((disabled_rb_mask & 3) == 2) {
                        /* RB1 disabled, RB0 enabled */
                        tmp = 0x00000000;
+               } else {
+                       /* RB0 disabled, RB1 enabled */
+                       tmp = 0x11111111;
                }
        } else {
                tmp = gb_addr_config & NUM_PIPES_MASK;
index ad7125486894d18ae90b0bc507d248d92baaf3e6..6b44580440d09a10053abcb53bf6f6c048d4e063 100644 (file)
 #define                SOFT_RESET_REGBB                        (1 << 22)
 #define                SOFT_RESET_ORB                          (1 << 23)
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 #define        SRBM_STATUS2                                    0x0EC4
 #define                DMA_BUSY                                (1 << 5)
 #define                DMA1_BUSY                               (1 << 6)
index 843b65f46ece168a8694a8a86a974befda12290b..fa2154493cf1537dee269149b1924468a6035325 100644 (file)
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                        radeon_crtc = to_radeon_crtc(crtc);
                        if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                               vrefresh = radeon_crtc->hw_mode.vrefresh;
+                               vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
                                break;
                        }
                }
index c830863bc98aa0cb55e84aedfbbc76606945ee01..a579ed379f20f4609b5b9736dc123e6c5adf1ea8 100644 (file)
@@ -715,6 +715,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
        struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
        struct radeon_device *rdev = p->rdev;
        uint32_t header;
+       int ret = 0, i;
 
        if (idx >= ib_chunk->length_dw) {
                DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +744,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
                break;
        default:
                DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto dump_ib;
        }
        if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
                DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
                          pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto dump_ib;
        }
        return 0;
+
+dump_ib:
+       for (i = 0; i < ib_chunk->length_dw; i++) {
+               if (i == idx)
+                       printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
+               else
+                       printk("\t0x%08x\n", radeon_get_ib_value(p, i));
+       }
+       return ret;
 }
 
 /**
index 6b670b0bc47bb9dca0238ef35dcff1cc2f686c34..3a297037cc176250fff7b1dae4a2566a8fd3c1ca 100644 (file)
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
                    (rdev->pdev->subsystem_vendor == 0x1734) &&
                    (rdev->pdev->subsystem_device == 0x1107))
                        use_bl = false;
+/* Older PPC macs use on-GPU backlight controller */
+#ifndef CONFIG_PPC_PMAC
                /* disable native backlight control on older asics */
                else if (rdev->family < CHIP_R600)
                        use_bl = false;
+#endif
                else
                        use_bl = true;
        }
index 9f758d39420dd4affddb42116a09e695a17b6abe..33cf4108386dbba4ef70a0e372eb992d1ff7e4d3 100644 (file)
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
                        single_display = false;
        }
 
+       /* 120hz tends to be problematic even if they are under the
+        * vblank limit.
+        */
+       if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
+               single_display = false;
+
        /* certain older asics have a separare 3D performance state,
         * so try that first if the user selected performance
         */
index 73107fe9e46f7de25d1d22b3ad3ba0b28f40c8f0..bcf516a8a2f1960a9403b765460a363ba0feaedd 100644 (file)
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 1);
+       WREG32(SRBM_INT_ACK, 1);
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
                switch (pkt.type) {
                case RADEON_PACKET_TYPE0:
                        dev_err(rdev->dev, "Packet0 not allowed!\n");
-                       for (i = 0; i < ib->length_dw; i++) {
-                               if (i == idx)
-                                       printk("\t0x%08x <---\n", ib->ptr[i]);
-                               else
-                                       printk("\t0x%08x\n", ib->ptr[i]);
-                       }
                        ret = -EINVAL;
                        break;
                case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
                        ret = -EINVAL;
                        break;
                }
-               if (ret)
+               if (ret) {
+                       for (i = 0; i < ib->length_dw; i++) {
+                               if (i == idx)
+                                       printk("\t0x%08x <---\n", ib->ptr[i]);
+                               else
+                                       printk("\t0x%08x\n", ib->ptr[i]);
+                       }
                        break;
+               }
        } while (idx < ib->length_dw);
 
        return ret;
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
        WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
        WREG32(GRBM_INT_CNTL, 0);
+       WREG32(SRBM_INT_CNTL, 0);
        if (rdev->num_crtc >= 2) {
                WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
                WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6609,6 +6613,10 @@ restart_ih:
                                break;
                        }
                        break;
+               case 96:
+                       DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+                       WREG32(SRBM_INT_ACK, 0x1);
+                       break;
                case 124: /* UVD */
                        DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
                        radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
index cbd91d226f3ce232b5933686ad41cae3ace64ebe..c27118cab16a625a978a2d57e22297f518764b4b 100644 (file)
 #define        CC_SYS_RB_BACKEND_DISABLE                       0xe80
 #define        GC_USER_SYS_RB_BACKEND_DISABLE                  0xe84
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 #define        SRBM_STATUS2                                    0x0EC4
 #define                DMA_BUSY                                (1 << 5)
 #define                DMA1_BUSY                               (1 << 6)
index 3aaa84ae26811fb8c731a89e76b91297b1ff3bf3..1a52522f5da76790dae32fe1a423bd75eaddd1e4 100644 (file)
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
        crtc->state = NULL;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (state)
+       if (state) {
                crtc->state = &state->base;
+               crtc->state->crtc = crtc;
+       }
 }
 
 static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
                return NULL;
 
        copy->base.mode_changed = false;
+       copy->base.active_changed = false;
        copy->base.planes_changed = false;
        copy->base.event = NULL;
 
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
        /* program display mode */
        tegra_dc_set_timings(dc, mode);
 
-       if (dc->soc->supports_border_color)
-               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
        /* interlacing isn't supported yet, so disable it */
        if (dc->soc->supports_interlacing) {
                value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
 
 static void tegra_crtc_prepare(struct drm_crtc *crtc)
 {
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-       unsigned int syncpt;
-       unsigned long value;
-
        drm_crtc_vblank_off(crtc);
-
-       if (dc->pipe)
-               syncpt = SYNCPT_VBLANK1;
-       else
-               syncpt = SYNCPT_VBLANK0;
-
-       /* initialize display controller */
-       tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-       tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
-       /* initialize timer */
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
-               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
-               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
-       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
 }
 
 static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
        struct tegra_drm *tegra = drm->dev_private;
        struct drm_plane *primary = NULL;
        struct drm_plane *cursor = NULL;
+       unsigned int syncpt;
+       u32 value;
        int err;
 
        if (tegra->domain) {
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
                goto cleanup;
        }
 
+       /* initialize display controller */
+       if (dc->pipe)
+               syncpt = SYNCPT_VBLANK1;
+       else
+               syncpt = SYNCPT_VBLANK0;
+
+       tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+       tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+       /* initialize timer */
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+       if (dc->soc->supports_border_color)
+               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
        return 0;
 
 cleanup:
index 7e06657ae58bc1eb2202e7da6c193eb4c8ca862b..7eaaee74a039f36c180629b2233f8455d88b1b6a 100644 (file)
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
        h_back_porch = mode->htotal - mode->hsync_end;
        h_front_porch = mode->hsync_start - mode->hdisplay;
 
+       err = clk_set_rate(hdmi->clk, pclk);
+       if (err < 0) {
+               dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
+                       err);
+       }
+
+       DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
+
        /* power up sequence */
        value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
        value &= ~SOR_PLL_PDBG;
index db4fb6e1cc5b3ca83d14d3f1e0e52e66fe75979a..7c669c328c4c7b4945dd22cde0f631aa51cb6b4b 100644 (file)
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 #endif
 #if IS_ENABLED(CONFIG_HID_SAITEK)
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
index 46edb4d3ed28efc61582811ea6b5fdeac2a2808a..204312bfab2c6319985ce8d82d0a92900c746c65 100644 (file)
 #define USB_DEVICE_ID_MS_LK6K          0x00f9
 #define USB_DEVICE_ID_MS_PRESENTER_8K_BT       0x0701
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB      0x0713
+#define USB_DEVICE_ID_MS_NE7K          0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K      0x0730
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500    0x076c
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
 #define USB_VENDOR_ID_SAITEK           0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
+#define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
 
index fbaea6eb882e21afb6cba576279834a099780434..af935eb198c93549867c4e50bb48a997e5cd3f2b 100644 (file)
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
                .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
                .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
+               .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
                .driver_data = MS_ERGONOMY | MS_RDESC },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
index 5632c54eadf0206faee31afefc99235763c37e14..a014f21275d8bfada33701b4bef5013f3b81eb2a 100644 (file)
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
 static const struct hid_device_id saitek_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
                .driver_data = SAITEK_FIX_PS1000 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 6a58b6c723aa215408051e2b3c967205569a7b94..e54ce1097e2cc57f5049cf852016087a32534c47 100644 (file)
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                        (collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
                                callback->hsdev->end_collection_index)) {
                        *priv = callback->priv;
                        *hsdev = callback->hsdev;
-                       spin_unlock(&pdata->dyn_callback_lock);
+                       spin_unlock_irqrestore(&pdata->dyn_callback_lock,
+                                              flags);
                        return callback->usage_callback;
                }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return NULL;
 }
index 31e9d25611064d0500eba47ea0b710beac31bb0b..1896c019e302934aa13c6f9ac434c51637f450ce 100644 (file)
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
 #define DS4_REPORT_0x81_SIZE 7
 #define SIXAXIS_REPORT_0xF2_SIZE 18
 
-static spinlock_t sony_dev_list_lock;
+static DEFINE_SPINLOCK(sony_dev_list_lock);
 static LIST_HEAD(sony_device_list);
 static DEFINE_IDA(sony_device_id_allocator);
 
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return -ENOMEM;
        }
 
+       spin_lock_init(&sc->lock);
+
        sc->quirks = quirks;
        hid_set_drvdata(hdev, sc);
        sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
 {
        dbg_hid("Sony:%s\n", __func__);
 
-       ida_destroy(&sony_device_id_allocator);
        hid_unregister_driver(&sony_driver);
+       ida_destroy(&sony_device_id_allocator);
 }
 module_init(sony_init);
 module_exit(sony_exit);
index d43e967e75339ec7972e734e284c4356e31a4e38..36053f33d6d93e97009b0d6ba3f4aa5416be8fea 100644 (file)
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
        int ret, ret_size;
-       int size = ihid->bufsize;
+       int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+       if (size > ihid->bufsize)
+               size = ihid->bufsize;
 
        ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
        if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
        dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
 
        ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
-                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                       IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                        client->name, ihid);
        if (ret < 0) {
                dev_warn(&client->dev,
index 1a6507999a6534f0b851e209bb702696d1f50e58..046351cf17f3432814b46e42f0892920c76f1b32 100644 (file)
@@ -778,6 +778,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
                        input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
                        input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
+                       if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
                } else if (features->type == CINTIQ_HYBRID) {
                        /*
                         * Do not send hardware buttons under Android. They
@@ -2725,9 +2730,9 @@ static const struct wacom_features wacom_features_0xF6 =
          .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
          .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x32A =
-       { "Wacom Cintiq 27QHD", 119740, 67520, 2047,
-         63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
-         WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+       { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+         WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x32B =
        { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
          WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
index d931cbbed24069a072385725f5c1fd454e04acdb..110fade9cb74680f0f37115353fe166f7edeca3d 100644 (file)
@@ -1606,7 +1606,7 @@ config SENSORS_W83795
          will be called w83795.
 
 config SENSORS_W83795_FANCTRL
-       boolean "Include automatic fan control support (DANGEROUS)"
+       bool "Include automatic fan control support (DANGEROUS)"
        depends on SENSORS_W83795
        default n
        help
index bce4e9ff21bff76606484f0a04ad1bf52b1ffee2..6c99ee7bafa3fdf47d6479b7198697ec835a23e8 100644 (file)
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client,
                                                    &ads2830_regmap_config);
        }
 
+       if (IS_ERR(data->regmap))
+               return PTR_ERR(data->regmap);
+
        data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
        if (!diff_input)
                data->cmd_byte |= ADS7828_CMD_SD_SE;
index a674cd83a4e2ecfb4e3fbf63f31539ca933ea02b..9f7dbd189c97420acfcbdef68e79137bb6b32015 100644 (file)
@@ -57,7 +57,7 @@ config SENSORS_LTC2978
          be called ltc2978.
 
 config SENSORS_LTC2978_REGULATOR
-       boolean "Regulator support for LTC2978 and compatibles"
+       bool "Regulator support for LTC2978 and compatibles"
        depends on SENSORS_LTC2978 && REGULATOR
        help
          If you say yes here you get regulator support for Linear
index 8c9e619f3026c9e4e88c979698cae39fd41e0c1a..78fbee46362828fceb0e098f0088ba8b8828e1f6 100644 (file)
@@ -35,11 +35,11 @@ config ACPI_I2C_OPREGION
 if I2C
 
 config I2C_BOARDINFO
-       boolean
+       bool
        default y
 
 config I2C_COMPAT
-       boolean "Enable compatibility bits for old user-space"
+       bool "Enable compatibility bits for old user-space"
        default y
        help
          Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
index ab838d9e28b6389dc6d97dc633ea6259d2126ca3..22da9c2ffa2250cad9a7172bdbc94890d659a744 100644 (file)
@@ -79,7 +79,7 @@ config I2C_AMD8111
 
 config I2C_HIX5HD2
        tristate "Hix5hd2 high-speed I2C driver"
-       depends on ARCH_HIX5HD2
+       depends on ARCH_HIX5HD2 || COMPILE_TEST
        help
          Say Y here to include support for high-speed I2C controller in the
          Hisilicon based hix5hd2 SoCs.
@@ -372,6 +372,16 @@ config I2C_BCM2835
          This support is also available as a module.  If so, the module
          will be called i2c-bcm2835.
 
+config I2C_BCM_IPROC
+       tristate "Broadcom iProc I2C controller"
+       depends on ARCH_BCM_IPROC || COMPILE_TEST
+       default ARCH_BCM_IPROC
+       help
+         If you say yes to this option, support will be included for the
+         Broadcom iProc I2C controller.
+
+         If you don't know what to do here, say N.
+
 config I2C_BCM_KONA
        tristate "BCM Kona I2C adapter"
        depends on ARCH_BCM_MOBILE
@@ -465,6 +475,16 @@ config I2C_DESIGNWARE_PCI
          This driver can also be built as a module.  If so, the module
          will be called i2c-designware-pci.
 
+config I2C_DESIGNWARE_BAYTRAIL
+       bool "Intel Baytrail I2C semaphore support"
+       depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
+       help
+         This driver enables managed host access to the PMIC I2C bus on select
+         Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
+         the host to request uninterrupted access to the PMIC's I2C bus from
+         the platform firmware controlling it. You should say Y if running on
+         a BayTrail system using the AXP288.
+
 config I2C_EFM32
        tristate "EFM32 I2C controller"
        depends on ARCH_EFM32 || COMPILE_TEST
index 56388f658d2f2567cbcf4b38433212c94d0d0faf..3638feb6677e1d6d7991b6d0d831ebc1b99c2e11 100644 (file)
@@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91)                += i2c-at91.o
 obj-$(CONFIG_I2C_AU1550)       += i2c-au1550.o
 obj-$(CONFIG_I2C_AXXIA)                += i2c-axxia.o
 obj-$(CONFIG_I2C_BCM2835)      += i2c-bcm2835.o
+obj-$(CONFIG_I2C_BCM_IPROC)    += i2c-bcm-iproc.o
 obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CADENCE)      += i2c-cadence.o
 obj-$(CONFIG_I2C_CBUS_GPIO)    += i2c-cbus-gpio.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_I2C_DAVINCI)     += i2c-davinci.o
 obj-$(CONFIG_I2C_DESIGNWARE_CORE)      += i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)  += i2c-designware-platform.o
 i2c-designware-platform-objs := i2c-designware-platdrv.o
+i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
 obj-$(CONFIG_I2C_DESIGNWARE_PCI)       += i2c-designware-pci.o
 i2c-designware-pci-objs := i2c-designware-pcidrv.o
 obj-$(CONFIG_I2C_EFM32)                += i2c-efm32.o
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
new file mode 100644 (file)
index 0000000..d3c8915
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CFG_OFFSET                   0x00
+#define CFG_RESET_SHIFT              31
+#define CFG_EN_SHIFT                 30
+#define CFG_M_RETRY_CNT_SHIFT        16
+#define CFG_M_RETRY_CNT_MASK         0x0f
+
+#define TIM_CFG_OFFSET               0x04
+#define TIM_CFG_MODE_400_SHIFT       31
+
+#define M_FIFO_CTRL_OFFSET           0x0c
+#define M_FIFO_RX_FLUSH_SHIFT        31
+#define M_FIFO_TX_FLUSH_SHIFT        30
+#define M_FIFO_RX_CNT_SHIFT          16
+#define M_FIFO_RX_CNT_MASK           0x7f
+#define M_FIFO_RX_THLD_SHIFT         8
+#define M_FIFO_RX_THLD_MASK          0x3f
+
+#define M_CMD_OFFSET                 0x30
+#define M_CMD_START_BUSY_SHIFT       31
+#define M_CMD_STATUS_SHIFT           25
+#define M_CMD_STATUS_MASK            0x07
+#define M_CMD_STATUS_SUCCESS         0x0
+#define M_CMD_STATUS_LOST_ARB        0x1
+#define M_CMD_STATUS_NACK_ADDR       0x2
+#define M_CMD_STATUS_NACK_DATA       0x3
+#define M_CMD_STATUS_TIMEOUT         0x4
+#define M_CMD_PROTOCOL_SHIFT         9
+#define M_CMD_PROTOCOL_MASK          0xf
+#define M_CMD_PROTOCOL_BLK_WR        0x7
+#define M_CMD_PROTOCOL_BLK_RD        0x8
+#define M_CMD_PEC_SHIFT              8
+#define M_CMD_RD_CNT_SHIFT           0
+#define M_CMD_RD_CNT_MASK            0xff
+
+#define IE_OFFSET                    0x38
+#define IE_M_RX_FIFO_FULL_SHIFT      31
+#define IE_M_RX_THLD_SHIFT           30
+#define IE_M_START_BUSY_SHIFT        28
+
+#define IS_OFFSET                    0x3c
+#define IS_M_RX_FIFO_FULL_SHIFT      31
+#define IS_M_RX_THLD_SHIFT           30
+#define IS_M_START_BUSY_SHIFT        28
+
+#define M_TX_OFFSET                  0x40
+#define M_TX_WR_STATUS_SHIFT         31
+#define M_TX_DATA_SHIFT              0
+#define M_TX_DATA_MASK               0xff
+
+#define M_RX_OFFSET                  0x44
+#define M_RX_STATUS_SHIFT            30
+#define M_RX_STATUS_MASK             0x03
+#define M_RX_PEC_ERR_SHIFT           29
+#define M_RX_DATA_SHIFT              0
+#define M_RX_DATA_MASK               0xff
+
+#define I2C_TIMEOUT_MESC             100
+#define M_TX_RX_FIFO_SIZE            64
+
+enum bus_speed_index {
+       I2C_SPD_100K = 0,
+       I2C_SPD_400K,
+};
+
+struct bcm_iproc_i2c_dev {
+       struct device *device;
+       int irq;
+
+       void __iomem *base;
+
+       struct i2c_adapter adapter;
+
+       struct completion done;
+       int xfer_is_done;
+};
+
+/*
+ * Can be expanded in the future if more interrupt status bits are utilized
+ */
+#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
+
+static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = data;
+       u32 status = readl(iproc_i2c->base + IS_OFFSET);
+
+       status &= ISR_MASK;
+
+       if (!status)
+               return IRQ_NONE;
+
+       writel(status, iproc_i2c->base + IS_OFFSET);
+       iproc_i2c->xfer_is_done = 1;
+       complete_all(&iproc_i2c->done);
+
+       return IRQ_HANDLED;
+}
+
+static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                     struct i2c_msg *msg)
+{
+       u32 val;
+
+       val = readl(iproc_i2c->base + M_CMD_OFFSET);
+       val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
+
+       switch (val) {
+       case M_CMD_STATUS_SUCCESS:
+               return 0;
+
+       case M_CMD_STATUS_LOST_ARB:
+               dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+               return -EAGAIN;
+
+       case M_CMD_STATUS_NACK_ADDR:
+               dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+               return -ENXIO;
+
+       case M_CMD_STATUS_NACK_DATA:
+               dev_dbg(iproc_i2c->device, "NAK data\n");
+               return -ENXIO;
+
+       case M_CMD_STATUS_TIMEOUT:
+               dev_dbg(iproc_i2c->device, "bus timeout\n");
+               return -ETIMEDOUT;
+
+       default:
+               dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+               return -EIO;
+       }
+}
+
+static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                        struct i2c_msg *msg)
+{
+       int ret, i;
+       u8 addr;
+       u32 val;
+       unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
+
+       /* need to reserve one byte in the FIFO for the slave address */
+       if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
+               dev_err(iproc_i2c->device,
+                       "only support data length up to %u bytes\n",
+                       M_TX_RX_FIFO_SIZE - 1);
+               return -EOPNOTSUPP;
+       }
+
+       /* check if bus is busy */
+       if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
+              BIT(M_CMD_START_BUSY_SHIFT))) {
+               dev_warn(iproc_i2c->device, "bus is busy\n");
+               return -EBUSY;
+       }
+
+       /* format and load slave address into the TX FIFO */
+       addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
+       writel(addr, iproc_i2c->base + M_TX_OFFSET);
+
+       /* for a write transaction, load data into the TX FIFO */
+       if (!(msg->flags & I2C_M_RD)) {
+               for (i = 0; i < msg->len; i++) {
+                       val = msg->buf[i];
+
+                       /* mark the last byte */
+                       if (i == msg->len - 1)
+                               val |= 1 << M_TX_WR_STATUS_SHIFT;
+
+                       writel(val, iproc_i2c->base + M_TX_OFFSET);
+               }
+       }
+
+       /* mark as incomplete before starting the transaction */
+       reinit_completion(&iproc_i2c->done);
+       iproc_i2c->xfer_is_done = 0;
+
+       /*
+        * Enable the "start busy" interrupt, which will be triggered after the
+        * transaction is done, i.e., the internal start_busy bit, transitions
+        * from 1 to 0.
+        */
+       writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
+
+       /*
+        * Now we can activate the transfer. For a read operation, specify the
+        * number of bytes to read
+        */
+       val = 1 << M_CMD_START_BUSY_SHIFT;
+       if (msg->flags & I2C_M_RD) {
+               val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
+                      (msg->len << M_CMD_RD_CNT_SHIFT);
+       } else {
+               val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
+       }
+       writel(val, iproc_i2c->base + M_CMD_OFFSET);
+
+       time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
+
+       /* disable all interrupts */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+       /* read it back to flush the write */
+       readl(iproc_i2c->base + IE_OFFSET);
+
+       /* make sure the interrupt handler isn't running */
+       synchronize_irq(iproc_i2c->irq);
+
+       if (!time_left && !iproc_i2c->xfer_is_done) {
+               dev_err(iproc_i2c->device, "transaction timed out\n");
+
+               /* flush FIFOs */
+               val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+                     (1 << M_FIFO_TX_FLUSH_SHIFT);
+               writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+               return -ETIMEDOUT;
+       }
+
+       ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
+       if (ret) {
+               /* flush both TX/RX FIFOs */
+               val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+                     (1 << M_FIFO_TX_FLUSH_SHIFT);
+               writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+               return ret;
+       }
+
+       /*
+        * For a read operation, we now need to load the data from FIFO
+        * into the memory buffer
+        */
+       if (msg->flags & I2C_M_RD) {
+               for (i = 0; i < msg->len; i++) {
+                       msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
+                                     M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+               }
+       }
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
+                             struct i2c_msg msgs[], int num)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
+       int ret, i;
+
+       /* go through all messages */
+       for (i = 0; i < num; i++) {
+               ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
+               if (ret) {
+                       dev_dbg(iproc_i2c->device, "xfer failed\n");
+                       return ret;
+               }
+       }
+
+       return num;
+}
+
+static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm bcm_iproc_algo = {
+       .master_xfer = bcm_iproc_i2c_xfer,
+       .functionality = bcm_iproc_i2c_functionality,
+};
+
+static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+       unsigned int bus_speed;
+       u32 val;
+       int ret = of_property_read_u32(iproc_i2c->device->of_node,
+                                      "clock-frequency", &bus_speed);
+       if (ret < 0) {
+               dev_info(iproc_i2c->device,
+                       "unable to interpret clock-frequency DT property\n");
+               bus_speed = 100000;
+       }
+
+       if (bus_speed < 100000) {
+               dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
+                       bus_speed);
+               dev_err(iproc_i2c->device,
+                       "valid speeds are 100khz and 400khz\n");
+               return -EINVAL;
+       } else if (bus_speed < 400000) {
+               bus_speed = 100000;
+       } else {
+               bus_speed = 400000;
+       }
+
+       val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
+       val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+       val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+       writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+
+       dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+       u32 val;
+
+       /* put controller in reset */
+       val = readl(iproc_i2c->base + CFG_OFFSET);
+       val |= 1 << CFG_RESET_SHIFT;
+       val &= ~(1 << CFG_EN_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+
+       /* wait 100 usec per spec */
+       udelay(100);
+
+       /* bring controller out of reset */
+       val &= ~(1 << CFG_RESET_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+
+       /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
+       val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
+       writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+
+       /* disable all interrupts */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+
+       /* clear all pending interrupts */
+       writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+
+       return 0;
+}
+
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                        bool enable)
+{
+       u32 val;
+
+       val = readl(iproc_i2c->base + CFG_OFFSET);
+       if (enable)
+               val |= BIT(CFG_EN_SHIFT);
+       else
+               val &= ~BIT(CFG_EN_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+}
+
+static int bcm_iproc_i2c_probe(struct platform_device *pdev)
+{
+       int irq, ret = 0;
+       struct bcm_iproc_i2c_dev *iproc_i2c;
+       struct i2c_adapter *adap;
+       struct resource *res;
+
+       iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
+                                GFP_KERNEL);
+       if (!iproc_i2c)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, iproc_i2c);
+       iproc_i2c->device = &pdev->dev;
+       init_completion(&iproc_i2c->done);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
+       if (IS_ERR(iproc_i2c->base))
+               return PTR_ERR(iproc_i2c->base);
+
+       ret = bcm_iproc_i2c_init(iproc_i2c);
+       if (ret)
+               return ret;
+
+       ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
+       if (ret)
+               return ret;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(iproc_i2c->device, "no irq resource\n");
+               return irq;
+       }
+       iproc_i2c->irq = irq;
+
+       ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
+                              pdev->name, iproc_i2c);
+       if (ret < 0) {
+               dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
+               return ret;
+       }
+
+       bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+
+       adap = &iproc_i2c->adapter;
+       i2c_set_adapdata(adap, iproc_i2c);
+       strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
+       adap->algo = &bcm_iproc_algo;
+       adap->dev.parent = &pdev->dev;
+       adap->dev.of_node = pdev->dev.of_node;
+
+       ret = i2c_add_adapter(adap);
+       if (ret) {
+               dev_err(iproc_i2c->device, "failed to add adapter\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_remove(struct platform_device *pdev)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
+
+       /* make sure there's no pending interrupt when we remove the adapter */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+       readl(iproc_i2c->base + IE_OFFSET);
+       synchronize_irq(iproc_i2c->irq);
+
+       i2c_del_adapter(&iproc_i2c->adapter);
+       bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+
+       return 0;
+}
+
+static const struct of_device_id bcm_iproc_i2c_of_match[] = {
+       { .compatible = "brcm,iproc-i2c" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
+
+static struct platform_driver bcm_iproc_i2c_driver = {
+       .driver = {
+               .name = "bcm-iproc-i2c",
+               .of_match_table = bcm_iproc_i2c_of_match,
+       },
+       .probe = bcm_iproc_i2c_probe,
+       .remove = bcm_iproc_i2c_remove,
+};
+module_platform_driver(bcm_iproc_i2c_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iProc I2C Driver");
+MODULE_LICENSE("GPL v2");
index 626f74ecd4be4ad40a0b45b2c6730c0afd4b0528..7d7a14cdadfb187d5fd4f95918146c8c6555443a 100644 (file)
  * @suspended:         Flag holding the device's PM status
  * @send_count:                Number of bytes still expected to send
  * @recv_count:                Number of bytes still expected to receive
+ * @curr_recv_count:   Number of bytes to be received in current transfer
  * @irq:               IRQ number
  * @input_clk:         Input clock to I2C controller
  * @i2c_clk:           Maximum I2C clock speed
@@ -146,6 +147,7 @@ struct cdns_i2c {
        u8 suspended;
        unsigned int send_count;
        unsigned int recv_count;
+       unsigned int curr_recv_count;
        int irq;
        unsigned long input_clk;
        unsigned int i2c_clk;
@@ -182,14 +184,15 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
  */
 static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 {
-       unsigned int isr_status, avail_bytes;
-       unsigned int bytes_to_recv, bytes_to_send;
+       unsigned int isr_status, avail_bytes, updatetx;
+       unsigned int bytes_to_send;
        struct cdns_i2c *id = ptr;
        /* Signal completion only after everything is updated */
        int done_flag = 0;
        irqreturn_t status = IRQ_NONE;
 
        isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+       cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
 
        /* Handling nack and arbitration lost interrupt */
        if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -197,89 +200,112 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
                status = IRQ_HANDLED;
        }
 
-       /* Handling Data interrupt */
-       if ((isr_status & CDNS_I2C_IXR_DATA) &&
-                       (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) {
-               /* Always read data interrupt threshold bytes */
-               bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH;
-               id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH;
-               avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-
-               /*
-                * if the tranfer size register value is zero, then
-                * check for the remaining bytes and update the
-                * transfer size register.
-                */
-               if (!avail_bytes) {
-                       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
-                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                       else
-                               cdns_i2c_writereg(id->recv_count,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-               }
+       /*
+        * Check if transfer size register needs to be updated again for a
+        * large data receive operation.
+        */
+       updatetx = 0;
+       if (id->recv_count > id->curr_recv_count)
+               updatetx = 1;
+
+       /* When receiving, handle data interrupt and completion interrupt */
+       if (id->p_recv_buf &&
+           ((isr_status & CDNS_I2C_IXR_COMP) ||
+            (isr_status & CDNS_I2C_IXR_DATA))) {
+               /* Read data if receive data valid is set */
+               while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
+                      CDNS_I2C_SR_RXDV) {
+                       /*
+                        * Clear hold bit that was set for FIFO control if
+                        * RX data left is less than FIFO depth, unless
+                        * repeated start is selected.
+                        */
+                       if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
+                           !id->bus_hold_flag)
+                               cdns_i2c_clear_bus_hold(id);
 
-               /* Process the data received */
-               while (bytes_to_recv--)
                        *(id->p_recv_buf)++ =
                                cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+                       id->recv_count--;
+                       id->curr_recv_count--;
 
-               if (!id->bus_hold_flag &&
-                               (id->recv_count <= CDNS_I2C_FIFO_DEPTH))
-                       cdns_i2c_clear_bus_hold(id);
+                       if (updatetx &&
+                           (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
+                               break;
+               }
 
-               status = IRQ_HANDLED;
-       }
+               /*
+                * The controller sends NACK to the slave when transfer size
+                * register reaches zero without considering the HOLD bit.
+                * This workaround is implemented for large data transfers to
+                * maintain transfer size non-zero while performing a large
+                * receive operation.
+                */
+               if (updatetx &&
+                   (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
+                       /* wait while fifo is full */
+                       while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
+                              (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
+                               ;
 
-       /* Handling Transfer Complete interrupt */
-       if (isr_status & CDNS_I2C_IXR_COMP) {
-               if (!id->p_recv_buf) {
                        /*
-                        * If the device is sending data If there is further
-                        * data to be sent. Calculate the available space
-                        * in FIFO and fill the FIFO with that many bytes.
+                        * Check number of bytes to be received against maximum
+                        * transfer size and update register accordingly.
                         */
-                       if (id->send_count) {
-                               avail_bytes = CDNS_I2C_FIFO_DEPTH -
-                                   cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-                               if (id->send_count > avail_bytes)
-                                       bytes_to_send = avail_bytes;
-                               else
-                                       bytes_to_send = id->send_count;
-
-                               while (bytes_to_send--) {
-                                       cdns_i2c_writereg(
-                                               (*(id->p_send_buf)++),
-                                                CDNS_I2C_DATA_OFFSET);
-                                       id->send_count--;
-                               }
+                       if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) >
+                           CDNS_I2C_TRANSFER_SIZE) {
+                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+                                                 CDNS_I2C_XFER_SIZE_OFFSET);
+                               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE +
+                                                     CDNS_I2C_FIFO_DEPTH;
                        } else {
-                               /*
-                                * Signal the completion of transaction and
-                                * clear the hold bus bit if there are no
-                                * further messages to be processed.
-                                */
-                               done_flag = 1;
+                               cdns_i2c_writereg(id->recv_count -
+                                                 CDNS_I2C_FIFO_DEPTH,
+                                                 CDNS_I2C_XFER_SIZE_OFFSET);
+                               id->curr_recv_count = id->recv_count;
                        }
-                       if (!id->send_count && !id->bus_hold_flag)
-                               cdns_i2c_clear_bus_hold(id);
-               } else {
+               }
+
+               /* Clear hold (if not repeated start) and signal completion */
+               if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) {
                        if (!id->bus_hold_flag)
                                cdns_i2c_clear_bus_hold(id);
+                       done_flag = 1;
+               }
+
+               status = IRQ_HANDLED;
+       }
+
+       /* When sending, handle transfer complete interrupt */
+       if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) {
+               /*
+                * If there is more data to be sent, calculate the
+                * space available in FIFO and fill with that many bytes.
+                */
+               if (id->send_count) {
+                       avail_bytes = CDNS_I2C_FIFO_DEPTH -
+                           cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+                       if (id->send_count > avail_bytes)
+                               bytes_to_send = avail_bytes;
+                       else
+                               bytes_to_send = id->send_count;
+
+                       while (bytes_to_send--) {
+                               cdns_i2c_writereg(
+                                       (*(id->p_send_buf)++),
+                                        CDNS_I2C_DATA_OFFSET);
+                               id->send_count--;
+                       }
+               } else {
                        /*
-                        * If the device is receiving data, then signal
-                        * the completion of transaction and read the data
-                        * present in the FIFO. Signal the completion of
-                        * transaction.
+                        * Signal the completion of transaction and
+                        * clear the hold bus bit if there are no
+                        * further messages to be processed.
                         */
-                       while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
-                                       CDNS_I2C_SR_RXDV) {
-                               *(id->p_recv_buf)++ =
-                                       cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
-                               id->recv_count--;
-                       }
                        done_flag = 1;
                }
+               if (!id->send_count && !id->bus_hold_flag)
+                       cdns_i2c_clear_bus_hold(id);
 
                status = IRQ_HANDLED;
        }
@@ -289,8 +315,6 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
        if (id->err_status)
                status = IRQ_HANDLED;
 
-       cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
-
        if (done_flag)
                complete(&id->xfer_done);
 
@@ -316,6 +340,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
        if (id->p_msg->flags & I2C_M_RECV_LEN)
                id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
 
+       id->curr_recv_count = id->recv_count;
+
        /*
         * Check for the message size against FIFO depth and set the
         * 'hold bus' bit if it is greater than FIFO depth.
@@ -335,11 +361,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
         * receive if it is less than transfer size and transfer size if
         * it is more. Enable the interrupts.
         */
-       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
+       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
                cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
                                  CDNS_I2C_XFER_SIZE_OFFSET);
-       else
+               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
+       } else {
                cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
+       }
+
        /* Clear the bus hold flag if bytes to receive is less than FIFO size */
        if (!id->bus_hold_flag &&
                ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
@@ -516,6 +545,20 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * processed with a repeated start.
         */
        if (num > 1) {
+               /*
+                * This controller does not give completion interrupt after a
+                * master receive message if HOLD bit is set (repeated start),
+                * resulting in SW timeout. Hence, if a receive message is
+                * followed by any other message, an error is returned
+                * indicating that this sequence is not supported.
+                */
+               for (count = 0; count < num - 1; count++) {
+                       if (msgs[count].flags & I2C_M_RD) {
+                               dev_warn(adap->dev.parent,
+                                        "Can't do repeated start after a receive message\n");
+                               return -EOPNOTSUPP;
+                       }
+               }
                id->bus_hold_flag = 1;
                reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
                reg |= CDNS_I2C_CR_HOLD;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
new file mode 100644 (file)
index 0000000..5f1ff4c
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Intel BayTrail PMIC I2C bus semaphore implementaion
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+#include "i2c-designware-core.h"
+
+#define SEMAPHORE_TIMEOUT      100
+#define PUNIT_SEMAPHORE                0x7
+
+static unsigned long acquired;
+
+static int get_sem(struct device *dev, u32 *sem)
+{
+       u32 reg_val;
+       int ret;
+
+       ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
+                           &reg_val);
+       if (ret) {
+               dev_err(dev, "iosf failed to read punit semaphore\n");
+               return ret;
+       }
+
+       *sem = reg_val & 0x1;
+
+       return 0;
+}
+
+static void reset_semaphore(struct device *dev)
+{
+       u32 data;
+
+       if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+                               PUNIT_SEMAPHORE, &data)) {
+               dev_err(dev, "iosf failed to reset punit semaphore during read\n");
+               return;
+       }
+
+       data = data & 0xfffffffe;
+       if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+                                PUNIT_SEMAPHORE, data))
+               dev_err(dev, "iosf failed to reset punit semaphore during write\n");
+}
+
+int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
+{
+       u32 sem = 0;
+       int ret;
+       unsigned long start, end;
+
+       if (!dev || !dev->dev)
+               return -ENODEV;
+
+       if (!dev->acquire_lock)
+               return 0;
+
+       /* host driver writes 0x2 to side band semaphore register */
+       ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+                                PUNIT_SEMAPHORE, 0x2);
+       if (ret) {
+               dev_err(dev->dev, "iosf punit semaphore request failed\n");
+               return ret;
+       }
+
+       /* host driver waits for bit 0 to be set in semaphore register */
+       start = jiffies;
+       end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
+       while (!time_after(jiffies, end)) {
+               ret = get_sem(dev->dev, &sem);
+               if (!ret && sem) {
+                       acquired = jiffies;
+                       dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
+                               jiffies_to_msecs(jiffies - start));
+                       return 0;
+               }
+
+               usleep_range(1000, 2000);
+       }
+
+       dev_err(dev->dev, "punit semaphore timed out, resetting\n");
+       reset_semaphore(dev->dev);
+
+       ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+               PUNIT_SEMAPHORE, &sem);
+       if (!ret)
+               dev_err(dev->dev, "iosf failed to read punit semaphore\n");
+       else
+               dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
+
+       WARN_ON(1);
+
+       return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(baytrail_i2c_acquire);
+
+void baytrail_i2c_release(struct dw_i2c_dev *dev)
+{
+       if (!dev || !dev->dev)
+               return;
+
+       if (!dev->acquire_lock)
+               return;
+
+       reset_semaphore(dev->dev);
+       dev_dbg(dev->dev, "punit semaphore held for %ums\n",
+               jiffies_to_msecs(jiffies - acquired));
+}
+EXPORT_SYMBOL(baytrail_i2c_release);
+
+int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
+{
+       acpi_status status;
+       unsigned long long shared_host = 0;
+       acpi_handle handle;
+
+       if (!dev || !dev->dev)
+               return 0;
+
+       handle = ACPI_HANDLE(dev->dev);
+       if (!handle)
+               return 0;
+
+       status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+
+       if (ACPI_FAILURE(status))
+               return 0;
+
+       if (shared_host) {
+               dev_info(dev->dev, "I2C bus managed by PUNIT\n");
+               dev->acquire_lock = baytrail_i2c_acquire;
+               dev->release_lock = baytrail_i2c_release;
+               dev->pm_runtime_disabled = true;
+       }
+
+       if (!iosf_mbi_available())
+               return -EPROBE_DEFER;
+
+       return 0;
+}
+EXPORT_SYMBOL(i2c_dw_eval_lock_support);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
+MODULE_LICENSE("GPL v2");
index 23628b7bfb8d8df208c6e434efb95e887dfad6e6..6e25c010e69037a544ad04f82df46e27e80ca9af 100644 (file)
@@ -170,10 +170,10 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
        u32 value;
 
        if (dev->accessor_flags & ACCESS_16BIT)
-               value = readw(dev->base + offset) |
-                       (readw(dev->base + offset + 2) << 16);
+               value = readw_relaxed(dev->base + offset) |
+                       (readw_relaxed(dev->base + offset + 2) << 16);
        else
-               value = readl(dev->base + offset);
+               value = readl_relaxed(dev->base + offset);
 
        if (dev->accessor_flags & ACCESS_SWAP)
                return swab32(value);
@@ -187,10 +187,10 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
                b = swab32(b);
 
        if (dev->accessor_flags & ACCESS_16BIT) {
-               writew((u16)b, dev->base + offset);
-               writew((u16)(b >> 16), dev->base + offset + 2);
+               writew_relaxed((u16)b, dev->base + offset);
+               writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
        } else {
-               writel(b, dev->base + offset);
+               writel_relaxed(b, dev->base + offset);
        }
 }
 
@@ -285,6 +285,15 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        u32 hcnt, lcnt;
        u32 reg;
        u32 sda_falling_time, scl_falling_time;
+       int ret;
+
+       if (dev->acquire_lock) {
+               ret = dev->acquire_lock(dev);
+               if (ret) {
+                       dev_err(dev->dev, "couldn't acquire bus ownership\n");
+                       return ret;
+               }
+       }
 
        input_clock_khz = dev->get_clk_rate_khz(dev);
 
@@ -298,6 +307,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        } else if (reg != DW_IC_COMP_TYPE_VALUE) {
                dev_err(dev->dev, "Unknown Synopsys component type: "
                        "0x%08x\n", reg);
+               if (dev->release_lock)
+                       dev->release_lock(dev);
                return -ENODEV;
        }
 
@@ -309,40 +320,39 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
        scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
 
-       /* Standard-mode */
-       hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-                               4000,   /* tHD;STA = tHIGH = 4.0 us */
-                               sda_falling_time,
-                               0,      /* 0: DW default, 1: Ideal */
-                               0);     /* No offset */
-       lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-                               4700,   /* tLOW = 4.7 us */
-                               scl_falling_time,
-                               0);     /* No offset */
-
-       /* Allow platforms to specify the ideal HCNT and LCNT values */
+       /* Set SCL timing parameters for standard-mode */
        if (dev->ss_hcnt && dev->ss_lcnt) {
                hcnt = dev->ss_hcnt;
                lcnt = dev->ss_lcnt;
+       } else {
+               hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+                                       4000,   /* tHD;STA = tHIGH = 4.0 us */
+                                       sda_falling_time,
+                                       0,      /* 0: DW default, 1: Ideal */
+                                       0);     /* No offset */
+               lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+                                       4700,   /* tLOW = 4.7 us */
+                                       scl_falling_time,
+                                       0);     /* No offset */
        }
        dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
        dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
        dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
-       /* Fast-mode */
-       hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-                               600,    /* tHD;STA = tHIGH = 0.6 us */
-                               sda_falling_time,
-                               0,      /* 0: DW default, 1: Ideal */
-                               0);     /* No offset */
-       lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-                               1300,   /* tLOW = 1.3 us */
-                               scl_falling_time,
-                               0);     /* No offset */
-
+       /* Set SCL timing parameters for fast-mode */
        if (dev->fs_hcnt && dev->fs_lcnt) {
                hcnt = dev->fs_hcnt;
                lcnt = dev->fs_lcnt;
+       } else {
+               hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+                                       600,    /* tHD;STA = tHIGH = 0.6 us */
+                                       sda_falling_time,
+                                       0,      /* 0: DW default, 1: Ideal */
+                                       0);     /* No offset */
+               lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+                                       1300,   /* tLOW = 1.3 us */
+                                       scl_falling_time,
+                                       0);     /* No offset */
        }
        dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
        dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
@@ -364,6 +374,9 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
 
        /* configure the i2c master */
        dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
+       if (dev->release_lock)
+               dev->release_lock(dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -627,6 +640,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        dev->abort_source = 0;
        dev->rx_outstanding = 0;
 
+       if (dev->acquire_lock) {
+               ret = dev->acquire_lock(dev);
+               if (ret) {
+                       dev_err(dev->dev, "couldn't acquire bus ownership\n");
+                       goto done_nolock;
+               }
+       }
+
        ret = i2c_dw_wait_bus_not_busy(dev);
        if (ret < 0)
                goto done;
@@ -672,6 +693,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        ret = -EIO;
 
 done:
+       if (dev->release_lock)
+               dev->release_lock(dev);
+
+done_nolock:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
        mutex_unlock(&dev->lock);
index 5a410ef17abd40c0ab7c7ece3ab3fa539adf1f95..9630222abf32197f48d580e43a14b049821f570f 100644 (file)
@@ -61,6 +61,9 @@
  * @ss_lcnt: standard speed LCNT value
  * @fs_hcnt: fast speed HCNT value
  * @fs_lcnt: fast speed LCNT value
+ * @acquire_lock: function to acquire a hardware lock on the bus
+ * @release_lock: function to release a hardware lock on the bus
+ * @pm_runtime_disabled: true if pm runtime is disabled
  *
  * HCNT and LCNT parameters can be used if the platform knows more accurate
  * values than the one computed based only on the input clock frequency.
@@ -101,6 +104,9 @@ struct dw_i2c_dev {
        u16                     ss_lcnt;
        u16                     fs_hcnt;
        u16                     fs_lcnt;
+       int                     (*acquire_lock)(struct dw_i2c_dev *dev);
+       void                    (*release_lock)(struct dw_i2c_dev *dev);
+       bool                    pm_runtime_disabled;
 };
 
 #define ACCESS_SWAP            0x00000001
@@ -119,3 +125,9 @@ extern void i2c_dw_disable(struct dw_i2c_dev *dev);
 extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
 extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
 extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+
+#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
+extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+#else
+static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+#endif
index acb40f95db78f512c561f6d4d2b5b67479844811..6643d2dc0b250ddbf022c669db4fd2b4b4f848e7 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2006 Texas Instruments.
  * Copyright (C) 2007 MontaVista Software Inc.
  * Copyright (C) 2009 Provigent Ltd.
- * Copyright (C) 2011 Intel corporation.
+ * Copyright (C) 2011, 2015 Intel Corporation.
  *
  * ----------------------------------------------------------------------------
  *
 #define DRIVER_NAME "i2c-designware-pci"
 
 enum dw_pci_ctl_id_t {
-       moorestown_0,
-       moorestown_1,
-       moorestown_2,
-
        medfield_0,
        medfield_1,
        medfield_2,
@@ -101,28 +97,7 @@ static struct dw_scl_sda_cfg hsw_config = {
        .sda_hold = 0x9,
 };
 
-static struct  dw_pci_controller  dw_pci_controllers[] = {
-       [moorestown_0] = {
-               .bus_num     = 0,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
-       [moorestown_1] = {
-               .bus_num     = 1,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
-       [moorestown_2] = {
-               .bus_num     = 2,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
+static struct dw_pci_controller dw_pci_controllers[] = {
        [medfield_0] = {
                .bus_num     = 0,
                .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
@@ -170,7 +145,6 @@ static struct  dw_pci_controller  dw_pci_controllers[] = {
                .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
                .tx_fifo_depth = 32,
                .rx_fifo_depth = 32,
-               .clk_khz = 100000,
                .functionality = I2C_FUNC_10BIT_ADDR,
                .scl_sda_cfg = &byt_config,
        },
@@ -179,7 +153,6 @@ static struct  dw_pci_controller  dw_pci_controllers[] = {
                .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
                .tx_fifo_depth = 32,
                .rx_fifo_depth = 32,
-               .clk_khz = 100000,
                .functionality = I2C_FUNC_10BIT_ADDR,
                .scl_sda_cfg = &hsw_config,
        },
@@ -259,7 +232,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
        dev->functionality = controller->functionality |
                                DW_DEFAULT_FUNCTIONALITY;
 
-       dev->master_cfg =  controller->bus_cfg;
+       dev->master_cfg = controller->bus_cfg;
        if (controller->scl_sda_cfg) {
                cfg = controller->scl_sda_cfg;
                dev->ss_hcnt = cfg->ss_hcnt;
@@ -325,12 +298,8 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
 MODULE_ALIAS("i2c_designware-pci");
 
 static const struct pci_device_id i2_designware_pci_ids[] = {
-       /* Moorestown */
-       { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
-       { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
-       { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
        /* Medfield */
-       { PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
+       { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
        { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
        { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
        { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
@@ -348,7 +317,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x9c61), haswell },
        { PCI_VDEVICE(INTEL, 0x9c62), haswell },
        /* Braswell / Cherrytrail */
-       { PCI_VDEVICE(INTEL, 0x22C1), baytrail,},
+       { PCI_VDEVICE(INTEL, 0x22C1), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
index 2b463c313e4e03305282b76c5fdefa8f90a48874..c270f5f9a8f9af3d3712bbd0f99874708875aa18 100644 (file)
@@ -195,6 +195,10 @@ static int dw_i2c_probe(struct platform_device *pdev)
                        clk_freq = pdata->i2c_scl_freq;
        }
 
+       r = i2c_dw_eval_lock_support(dev);
+       if (r)
+               return r;
+
        dev->functionality =
                I2C_FUNC_I2C |
                I2C_FUNC_10BIT_ADDR |
@@ -257,10 +261,14 @@ static int dw_i2c_probe(struct platform_device *pdev)
                return r;
        }
 
-       pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
+       if (dev->pm_runtime_disabled) {
+               pm_runtime_forbid(&pdev->dev);
+       } else {
+               pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+               pm_runtime_use_autosuspend(&pdev->dev);
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        return 0;
 }
@@ -310,7 +318,9 @@ static int dw_i2c_resume(struct device *dev)
        struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
 
        clk_prepare_enable(i_dev->clk);
-       i2c_dw_init(i_dev);
+
+       if (!i_dev->pm_runtime_disabled)
+               i2c_dw_init(i_dev);
 
        return 0;
 }
index 7f3a9fe9bf4e729a2c446ba905a117a325f17621..d7b26fc6f432005bb02a2394fd4bfb6720271ab1 100644 (file)
@@ -201,7 +201,7 @@ struct imx_i2c_struct {
        void __iomem            *base;
        wait_queue_head_t       queue;
        unsigned long           i2csr;
-       unsigned int            disable_delay;
+       unsigned int            disable_delay;
        int                     stopped;
        unsigned int            ifdr; /* IMX_I2C_IFDR */
        unsigned int            cur_clk;
@@ -295,7 +295,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
        dma->chan_tx = dma_request_slave_channel(dev, "tx");
        if (!dma->chan_tx) {
                dev_dbg(dev, "can't request DMA tx channel\n");
-               ret = -ENODEV;
                goto fail_al;
        }
 
@@ -313,7 +312,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
        dma->chan_rx = dma_request_slave_channel(dev, "rx");
        if (!dma->chan_rx) {
                dev_dbg(dev, "can't request DMA rx channel\n");
-               ret = -ENODEV;
                goto fail_tx;
        }
 
@@ -481,8 +479,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
        i2c_clk_rate = clk_get_rate(i2c_imx->clk);
        if (i2c_imx->cur_clk == i2c_clk_rate)
                return;
-       else
-               i2c_imx->cur_clk = i2c_clk_rate;
+
+       i2c_imx->cur_clk = i2c_clk_rate;
 
        div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
        if (div < i2c_clk_div[0].div)
@@ -490,7 +488,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
        else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
                i = i2c_imx->hwdata->ndivs - 1;
        else
-               for (i = 0; i2c_clk_div[i].div < div; i++);
+               for (i = 0; i2c_clk_div[i].div < div; i++)
+                       ;
 
        /* Store divider value */
        i2c_imx->ifdr = i2c_clk_div[i].val;
@@ -628,9 +627,9 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
        result = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
-       if (result <= 0) {
+       if (result == 0) {
                dmaengine_terminate_all(dma->chan_using);
-               return result ?: -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        /* Waiting for transfer complete. */
@@ -686,9 +685,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        result = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
-       if (result <= 0) {
+       if (result == 0) {
                dmaengine_terminate_all(dma->chan_using);
-               return result ?: -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        /* waiting for transfer complete. */
@@ -822,6 +821,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
        /* read data */
        for (i = 0; i < msgs->len; i++) {
                u8 len = 0;
+
                result = i2c_imx_trx_complete(i2c_imx);
                if (result)
                        return result;
@@ -917,15 +917,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
                /* write/read data */
 #ifdef CONFIG_I2C_DEBUG_BUS
                temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
-               dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, "
-                       "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__,
+               dev_dbg(&i2c_imx->adapter.dev,
+                       "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n",
+                       __func__,
                        (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
                        (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
                        (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
                temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
                dev_dbg(&i2c_imx->adapter.dev,
-                       "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, "
-                       "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__,
+                       "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n",
+                       __func__,
                        (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0),
                        (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0),
                        (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
@@ -1004,7 +1005,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
        i2c_imx->adapter.owner          = THIS_MODULE;
        i2c_imx->adapter.algo           = &i2c_imx_algo;
        i2c_imx->adapter.dev.parent     = &pdev->dev;
-       i2c_imx->adapter.nr             = pdev->id;
+       i2c_imx->adapter.nr             = pdev->id;
        i2c_imx->adapter.dev.of_node    = pdev->dev.of_node;
        i2c_imx->base                   = base;
 
@@ -1063,7 +1064,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
                i2c_imx->adapter.name);
        dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
 
-       /* Init DMA config if support*/
+       /* Init DMA config if supported */
        i2c_imx_dma_request(i2c_imx, phy_addr);
 
        return 0;   /* Return OK */
index 7249b5b1e5d091bbd9d906d4bc16cd89fbd36d82..abf5db7e441ebab65fc7c8ad99b5f9bca6218b15 100644 (file)
@@ -12,6 +12,7 @@
  * kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -35,7 +36,9 @@ struct ocores_i2c {
        int pos;
        int nmsgs;
        int state; /* see STATE_ */
-       int clock_khz;
+       struct clk *clk;
+       int ip_clock_khz;
+       int bus_clock_khz;
        void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
        u8 (*getreg)(struct ocores_i2c *i2c, int reg);
 };
@@ -215,21 +218,34 @@ static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                return -ETIMEDOUT;
 }
 
-static void ocores_init(struct ocores_i2c *i2c)
+static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
 {
        int prescale;
+       int diff;
        u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
 
        /* make sure the device is disabled */
        oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
-       prescale = (i2c->clock_khz / (5*100)) - 1;
+       prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1;
+       prescale = clamp(prescale, 0, 0xffff);
+
+       diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz;
+       if (abs(diff) > i2c->bus_clock_khz / 10) {
+               dev_err(dev,
+                       "Unsupported clock settings: core: %d KHz, bus: %d KHz\n",
+                       i2c->ip_clock_khz, i2c->bus_clock_khz);
+               return -EINVAL;
+       }
+
        oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff);
        oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
 
        /* Init the device */
        oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
        oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN);
+
+       return 0;
 }
 
 
@@ -304,6 +320,8 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
        struct device_node *np = pdev->dev.of_node;
        const struct of_device_id *match;
        u32 val;
+       u32 clock_frequency;
+       bool clock_frequency_present;
 
        if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
                /* no 'reg-shift', check for deprecated 'regstep' */
@@ -319,12 +337,42 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
                }
        }
 
-       if (of_property_read_u32(np, "clock-frequency", &val)) {
-               dev_err(&pdev->dev,
-                       "Missing required parameter 'clock-frequency'\n");
-               return -ENODEV;
+       clock_frequency_present = !of_property_read_u32(np, "clock-frequency",
+                                                       &clock_frequency);
+       i2c->bus_clock_khz = 100;
+
+       i2c->clk = devm_clk_get(&pdev->dev, NULL);
+
+       if (!IS_ERR(i2c->clk)) {
+               int ret = clk_prepare_enable(i2c->clk);
+
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "clk_prepare_enable failed: %d\n", ret);
+                       return ret;
+               }
+               i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
+               if (clock_frequency_present)
+                       i2c->bus_clock_khz = clock_frequency / 1000;
+       }
+
+       if (i2c->ip_clock_khz == 0) {
+               if (of_property_read_u32(np, "opencores,ip-clock-frequency",
+                                               &val)) {
+                       if (!clock_frequency_present) {
+                               dev_err(&pdev->dev,
+                                       "Missing required parameter 'opencores,ip-clock-frequency'\n");
+                               return -ENODEV;
+                       }
+                       i2c->ip_clock_khz = clock_frequency / 1000;
+                       dev_warn(&pdev->dev,
+                                "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n");
+               } else {
+                       i2c->ip_clock_khz = val / 1000;
+                       if (clock_frequency_present)
+                               i2c->bus_clock_khz = clock_frequency / 1000;
+               }
        }
-       i2c->clock_khz = val / 1000;
 
        of_property_read_u32(pdev->dev.of_node, "reg-io-width",
                                &i2c->reg_io_width);
@@ -368,7 +416,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        if (pdata) {
                i2c->reg_shift = pdata->reg_shift;
                i2c->reg_io_width = pdata->reg_io_width;
-               i2c->clock_khz = pdata->clock_khz;
+               i2c->ip_clock_khz = pdata->clock_khz;
+               i2c->bus_clock_khz = 100;
        } else {
                ret = ocores_i2c_of_probe(pdev, i2c);
                if (ret)
@@ -402,7 +451,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
                }
        }
 
-       ocores_init(i2c);
+       ret = ocores_init(&pdev->dev, i2c);
+       if (ret)
+               return ret;
 
        init_waitqueue_head(&i2c->wait);
        ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
@@ -446,6 +497,9 @@ static int ocores_i2c_remove(struct platform_device *pdev)
        /* remove adapter & data */
        i2c_del_adapter(&i2c->adap);
 
+       if (!IS_ERR(i2c->clk))
+               clk_disable_unprepare(i2c->clk);
+
        return 0;
 }
 
@@ -458,6 +512,8 @@ static int ocores_i2c_suspend(struct device *dev)
        /* make sure the device is disabled */
        oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
+       if (!IS_ERR(i2c->clk))
+               clk_disable_unprepare(i2c->clk);
        return 0;
 }
 
@@ -465,9 +521,20 @@ static int ocores_i2c_resume(struct device *dev)
 {
        struct ocores_i2c *i2c = dev_get_drvdata(dev);
 
-       ocores_init(i2c);
+       if (!IS_ERR(i2c->clk)) {
+               unsigned long rate;
+               int ret = clk_prepare_enable(i2c->clk);
 
-       return 0;
+               if (ret) {
+                       dev_err(dev,
+                               "clk_prepare_enable failed: %d\n", ret);
+                       return ret;
+               }
+               rate = clk_get_rate(i2c->clk) / 1000;
+               if (rate)
+                       i2c->ip_clock_khz = rate;
+       }
+       return ocores_init(dev, i2c);
 }
 
 static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
index 44f03eed00dd4f36ae655df26315699556a576a8..d37d9db6681e7b5745a45331ae60737a9b36f99a 100644 (file)
@@ -148,13 +148,6 @@ static inline u32 pmcmsptwi_clock_to_reg(
        return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
 }
 
-static inline void pmcmsptwi_reg_to_clock(
-                       u32 reg, struct pmcmsptwi_clock *clock)
-{
-       clock->filter = (reg >> 12) & 0xf;
-       clock->clock = reg & 0x03ff;
-}
-
 static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
 {
        return ((cfg->arbf & 0xf) << 12) |
index 92462843db663d09b1106468603b1b9eb65243d2..5f96b1b3e3a5a30e2163098e4afe94fc4a06deeb 100644 (file)
@@ -102,6 +102,9 @@ struct rk3x_i2c {
 
        /* Settings */
        unsigned int scl_frequency;
+       unsigned int scl_rise_ns;
+       unsigned int scl_fall_ns;
+       unsigned int sda_fall_ns;
 
        /* Synchronization & notification */
        spinlock_t lock;
@@ -435,6 +438,9 @@ out:
  *
  * @clk_rate: I2C input clock rate
  * @scl_rate: Desired SCL rate
+ * @scl_rise_ns: How many ns it takes for SCL to rise.
+ * @scl_fall_ns: How many ns it takes for SCL to fall.
+ * @sda_fall_ns: How many ns it takes for SDA to fall.
  * @div_low: Divider output for low
  * @div_high: Divider output for high
  *
@@ -443,11 +449,16 @@ out:
  * too high, we silently use the highest possible rate.
  */
 static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
+                             unsigned long scl_rise_ns,
+                             unsigned long scl_fall_ns,
+                             unsigned long sda_fall_ns,
                              unsigned long *div_low, unsigned long *div_high)
 {
-       unsigned long min_low_ns, min_high_ns;
-       unsigned long max_data_hold_ns;
+       unsigned long spec_min_low_ns, spec_min_high_ns;
+       unsigned long spec_setup_start, spec_max_data_hold_ns;
        unsigned long data_hold_buffer_ns;
+
+       unsigned long min_low_ns, min_high_ns;
        unsigned long max_low_ns, min_total_ns;
 
        unsigned long clk_rate_khz, scl_rate_khz;
@@ -469,29 +480,50 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
                scl_rate = 1000;
 
        /*
-        * min_low_ns:  The minimum number of ns we need to hold low
-        *              to meet i2c spec
-        * min_high_ns: The minimum number of ns we need to hold high
-        *              to meet i2c spec
-        * max_low_ns:  The maximum number of ns we can hold low
-        *              to meet i2c spec
+        * min_low_ns:  The minimum number of ns we need to hold low to
+        *              meet I2C specification, should include fall time.
+        * min_high_ns: The minimum number of ns we need to hold high to
+        *              meet I2C specification, should include rise time.
+        * max_low_ns:  The maximum number of ns we can hold low to meet
+        *              I2C specification.
         *
-        * Note: max_low_ns should be (max data hold time * 2 - buffer)
+        * Note: max_low_ns should be (maximum data hold time * 2 - buffer)
         *       This is because the i2c host on Rockchip holds the data line
         *       for half the low time.
         */
        if (scl_rate <= 100000) {
-               min_low_ns = 4700;
-               min_high_ns = 4000;
-               max_data_hold_ns = 3450;
+               /* Standard-mode */
+               spec_min_low_ns = 4700;
+               spec_setup_start = 4700;
+               spec_min_high_ns = 4000;
+               spec_max_data_hold_ns = 3450;
                data_hold_buffer_ns = 50;
        } else {
-               min_low_ns = 1300;
-               min_high_ns = 600;
-               max_data_hold_ns = 900;
+               /* Fast-mode */
+               spec_min_low_ns = 1300;
+               spec_setup_start = 600;
+               spec_min_high_ns = 600;
+               spec_max_data_hold_ns = 900;
                data_hold_buffer_ns = 50;
        }
-       max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns;
+       min_high_ns = scl_rise_ns + spec_min_high_ns;
+
+       /*
+        * Timings for repeated start:
+        * - controller appears to drop SDA at .875x (7/8) programmed clk high.
+        * - controller appears to keep SCL high for 2x programmed clk high.
+        *
+        * We need to account for those rules in picking our "high" time so
+        * we meet tSU;STA and tHD;STA times.
+        */
+       min_high_ns = max(min_high_ns,
+               DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
+       min_high_ns = max(min_high_ns,
+               DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
+                             sda_fall_ns + spec_min_high_ns), 2));
+
+       min_low_ns = scl_fall_ns + spec_min_low_ns;
+       max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
        min_total_ns = min_low_ns + min_high_ns;
 
        /* Adjust to avoid overflow */
@@ -510,8 +542,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
        min_div_for_hold = (min_low_div + min_high_div);
 
        /*
-        * This is the maximum divider so we don't go over the max.
-        * We don't round up here (we round down) since this is a max.
+        * This is the maximum divider so we don't go over the maximum.
+        * We don't round up here (we round down) since this is a maximum.
         */
        max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
 
@@ -544,7 +576,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
                ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
                                             scl_rate_khz * 8 * min_total_ns);
 
-               /* Don't allow it to go over the max */
+               /* Don't allow it to go over the maximum */
                if (ideal_low_div > max_low_div)
                        ideal_low_div = max_low_div;
 
@@ -588,9 +620,9 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
        u64 t_low_ns, t_high_ns;
        int ret;
 
-       ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low,
-                                &div_high);
-
+       ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
+                                i2c->scl_fall_ns, i2c->sda_fall_ns,
+                                &div_low, &div_high);
        WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
 
        clk_enable(i2c->clk);
@@ -633,9 +665,10 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
        switch (event) {
        case PRE_RATE_CHANGE:
                if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
-                                     &div_low, &div_high) != 0) {
+                                      i2c->scl_rise_ns, i2c->scl_fall_ns,
+                                      i2c->sda_fall_ns,
+                                      &div_low, &div_high) != 0)
                        return NOTIFY_STOP;
-               }
 
                /* scale up */
                if (ndata->new_rate > ndata->old_rate)
@@ -859,6 +892,24 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
                i2c->scl_frequency = DEFAULT_SCL_RATE;
        }
 
+       /*
+        * Read rise and fall time from device tree. If not available use
+        * the default maximum timing from the specification.
+        */
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
+                                &i2c->scl_rise_ns)) {
+               if (i2c->scl_frequency <= 100000)
+                       i2c->scl_rise_ns = 1000;
+               else
+                       i2c->scl_rise_ns = 300;
+       }
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
+                                &i2c->scl_fall_ns))
+               i2c->scl_fall_ns = 300;
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
+                                &i2c->scl_fall_ns))
+               i2c->sda_fall_ns = i2c->scl_fall_ns;
+
        strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
        i2c->adap.owner = THIS_MODULE;
        i2c->adap.algo = &rk3x_i2c_algorithm;
index 28b87e683503df42c2bc11ee36c95b505e04f884..29f14331dd9d01fcb5d66f1e74602b61da9593cf 100644 (file)
@@ -286,6 +286,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (rx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+               val = cpu_to_le32(val);
                memcpy(buf, &val, buf_remaining);
                buf_remaining = 0;
                rx_fifo_avail--;
@@ -344,6 +345,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+               val = le32_to_cpu(val);
 
                /* Again update before writing to FIFO to make sure isr sees. */
                i2c_dev->msg_buf_remaining = 0;
index e9eae57a2b50f77e3d25c4d9fcfa003728464740..210cf4874cb7ea2415df5fb3e1d30ec8065de5d4 100644 (file)
@@ -102,7 +102,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
                struct acpi_resource_i2c_serialbus *sb;
 
                sb = &ares->data.i2c_serial_bus;
-               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+               if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
                        info->addr = sb->slave_address;
                        if (sb->access_mode == ACPI_I2C_10BIT_MODE)
                                info->flags |= I2C_CLIENT_TEN;
@@ -698,101 +698,6 @@ static void i2c_device_shutdown(struct device *dev)
                driver->shutdown(client);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
-       struct i2c_client *client = i2c_verify_client(dev);
-       struct i2c_driver *driver;
-
-       if (!client || !dev->driver)
-               return 0;
-       driver = to_i2c_driver(dev->driver);
-       if (!driver->suspend)
-               return 0;
-       return driver->suspend(client, mesg);
-}
-
-static int i2c_legacy_resume(struct device *dev)
-{
-       struct i2c_client *client = i2c_verify_client(dev);
-       struct i2c_driver *driver;
-
-       if (!client || !dev->driver)
-               return 0;
-       driver = to_i2c_driver(dev->driver);
-       if (!driver->resume)
-               return 0;
-       return driver->resume(client);
-}
-
-static int i2c_device_pm_suspend(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_suspend(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_SUSPEND);
-}
-
-static int i2c_device_pm_resume(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_resume(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_freeze(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_freeze(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_FREEZE);
-}
-
-static int i2c_device_pm_thaw(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_thaw(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_poweroff(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_poweroff(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
-}
-
-static int i2c_device_pm_restore(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_restore(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-#else /* !CONFIG_PM_SLEEP */
-#define i2c_device_pm_suspend  NULL
-#define i2c_device_pm_resume   NULL
-#define i2c_device_pm_freeze   NULL
-#define i2c_device_pm_thaw     NULL
-#define i2c_device_pm_poweroff NULL
-#define i2c_device_pm_restore  NULL
-#endif /* !CONFIG_PM_SLEEP */
-
 static void i2c_client_dev_release(struct device *dev)
 {
        kfree(to_i2c_client(dev));
@@ -804,6 +709,7 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
        return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
                       to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
 }
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
 static ssize_t
 show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -817,8 +723,6 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
 
        return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
 }
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
 static struct attribute *i2c_dev_attrs[] = {
@@ -827,29 +731,7 @@ static struct attribute *i2c_dev_attrs[] = {
        &dev_attr_modalias.attr,
        NULL
 };
-
-static struct attribute_group i2c_dev_attr_group = {
-       .attrs          = i2c_dev_attrs,
-};
-
-static const struct attribute_group *i2c_dev_attr_groups[] = {
-       &i2c_dev_attr_group,
-       NULL
-};
-
-static const struct dev_pm_ops i2c_device_pm_ops = {
-       .suspend = i2c_device_pm_suspend,
-       .resume = i2c_device_pm_resume,
-       .freeze = i2c_device_pm_freeze,
-       .thaw = i2c_device_pm_thaw,
-       .poweroff = i2c_device_pm_poweroff,
-       .restore = i2c_device_pm_restore,
-       SET_RUNTIME_PM_OPS(
-               pm_generic_runtime_suspend,
-               pm_generic_runtime_resume,
-               NULL
-       )
-};
+ATTRIBUTE_GROUPS(i2c_dev);
 
 struct bus_type i2c_bus_type = {
        .name           = "i2c",
@@ -857,12 +739,11 @@ struct bus_type i2c_bus_type = {
        .probe          = i2c_device_probe,
        .remove         = i2c_device_remove,
        .shutdown       = i2c_device_shutdown,
-       .pm             = &i2c_device_pm_ops,
 };
 EXPORT_SYMBOL_GPL(i2c_bus_type);
 
 static struct device_type i2c_client_type = {
-       .groups         = i2c_dev_attr_groups,
+       .groups         = i2c_dev_groups,
        .uevent         = i2c_device_uevent,
        .release        = i2c_client_dev_release,
 };
@@ -1261,6 +1142,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
 
        return count;
 }
+static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 
 /*
  * And of course let the users delete the devices they instantiated, if
@@ -1315,8 +1197,6 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
                        "delete_device");
        return res;
 }
-
-static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
                                   i2c_sysfs_delete_device);
 
@@ -1326,18 +1206,10 @@ static struct attribute *i2c_adapter_attrs[] = {
        &dev_attr_delete_device.attr,
        NULL
 };
-
-static struct attribute_group i2c_adapter_attr_group = {
-       .attrs          = i2c_adapter_attrs,
-};
-
-static const struct attribute_group *i2c_adapter_attr_groups[] = {
-       &i2c_adapter_attr_group,
-       NULL
-};
+ATTRIBUTE_GROUPS(i2c_adapter);
 
 struct device_type i2c_adapter_type = {
-       .groups         = i2c_adapter_attr_groups,
+       .groups         = i2c_adapter_groups,
        .release        = i2c_adapter_dev_release,
 };
 EXPORT_SYMBOL_GPL(i2c_adapter_type);
@@ -1419,8 +1291,6 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
        if (of_get_property(node, "wakeup-source", NULL))
                info.flags |= I2C_CLIENT_WAKE;
 
-       request_module("%s%s", I2C_MODULE_PREFIX, info.type);
-
        result = i2c_new_device(adap, &info);
        if (result == NULL) {
                dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
@@ -1796,11 +1666,15 @@ void i2c_del_adapter(struct i2c_adapter *adap)
        /* device name is gone after device_unregister */
        dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
 
-       /* clean up the sysfs representation */
+       /* wait until all references to the device are gone
+        *
+        * FIXME: This is old code and should ideally be replaced by an
+        * alternative which results in decoupling the lifetime of the struct
+        * device from the i2c_adapter, like spi or netdev do. Any solution
+        * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled!
+        */
        init_completion(&adap->dev_released);
        device_unregister(&adap->dev);
-
-       /* wait for sysfs to drop all references */
        wait_for_completion(&adap->dev_released);
 
        /* free bus id */
@@ -1859,14 +1733,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
        if (res)
                return res;
 
-       /* Drivers should switch to dev_pm_ops instead. */
-       if (driver->suspend)
-               pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
-                       driver->driver.name);
-       if (driver->resume)
-               pr_warn("i2c-core: driver [%s] using legacy resume method\n",
-                       driver->driver.name);
-
        pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
        INIT_LIST_HEAD(&driver->clients);
index ec11b404b433737657957ef9dd0e559a955b45aa..3d8f4fe2e47e52eefff7da7967fe41bf0fafe801 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/i2c-mux.h>
 #include <linux/i2c/pca954x.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 
@@ -186,6 +187,8 @@ static int pca954x_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
        struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+       struct device_node *of_node = client->dev.of_node;
+       bool idle_disconnect_dt;
        struct gpio_desc *gpio;
        int num, force, class;
        struct pca954x *data;
@@ -217,8 +220,13 @@ static int pca954x_probe(struct i2c_client *client,
        data->type = id->driver_data;
        data->last_chan = 0;               /* force the first selection */
 
+       idle_disconnect_dt = of_node &&
+               of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+
        /* Now create an adapter for each channel */
        for (num = 0; num < chips[data->type].nchans; num++) {
+               bool idle_disconnect_pd = false;
+
                force = 0;                        /* dynamic adap number */
                class = 0;                        /* no class by default */
                if (pdata) {
@@ -229,12 +237,13 @@ static int pca954x_probe(struct i2c_client *client,
                        } else
                                /* discard unconfigured channels */
                                break;
+                       idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
                }
 
                data->virt_adaps[num] =
                        i2c_add_mux_adapter(adap, &client->dev, client,
                                force, num, class, pca954x_select_chan,
-                               (pdata && pdata->modes[num].deselect_on_exit)
+                               (idle_disconnect_pd || idle_disconnect_dt)
                                        ? pca954x_deselect_mux : NULL);
 
                if (data->virt_adaps[num] == NULL) {
index 4132935dc929a5a891b36908efd50978a67f7ea6..4011effe4c05d972959fb8fe9c3db297ee248421 100644 (file)
@@ -21,7 +21,7 @@ config IIO_BUFFER
 if IIO_BUFFER
 
 config IIO_BUFFER_CB
-boolean "IIO callback buffer used for push in-kernel interfaces"
+       bool "IIO callback buffer used for push in-kernel interfaces"
        help
          Should be selected by any drivers that do in-kernel push
          usage.  That is, those where the data is pushed to the consumer.
@@ -43,7 +43,7 @@ config IIO_TRIGGERED_BUFFER
 endif # IIO_BUFFER
 
 config IIO_TRIGGER
-       boolean "Enable triggered sampling support"
+       bool "Enable triggered sampling support"
        help
          Provides IIO core support for triggers.  Currently these
          are used to initialize capture of samples to push into
index 56a4b7ca7ee38ba118796d6d5e3eb770bbebc69a..45d67e9228d75b44d71354d248cf97140dfe37ad 100644 (file)
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
        if (!optlen)
                return -EINVAL;
 
+       memset(&sa_path, 0, sizeof(sa_path));
+       sa_path.vlan_id = 0xffff;
+
        ib_sa_unpack_path(path_data->path_rec, &sa_path);
        ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
        if (ret)
index 6095872549e79fb0dd9c0b3702c4eb01e974fb3f..8b8cc6fa0ab0c1ebf966b2ace4932807d9181bf1 100644 (file)
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
        if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
                rbt_ib_umem_insert(&umem->odp_data->interval_tree,
                                   &context->umem_tree);
-       if (likely(!atomic_read(&context->notifier_count)))
+       if (likely(!atomic_read(&context->notifier_count)) ||
+           context->odp_mrs_count == 1)
                umem->odp_data->mn_counters_active = true;
        else
                list_add(&umem->odp_data->no_private_counters,
index 643c08a025a52d015431b8a27be1ddcacbd36845..b716b08156446e186c9ae608f3f4e6343c6f200f 100644 (file)
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
index b7943ff16ed3f2edece8ec4cc3c7931594bd943a..a9f048990dfcd833de09978c0448ad979749e4c9 100644 (file)
@@ -400,6 +400,52 @@ err:
        return ret;
 }
 
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+                                 struct ib_uverbs_query_device_resp *resp,
+                                 struct ib_device_attr *attr)
+{
+       resp->fw_ver            = attr->fw_ver;
+       resp->node_guid         = file->device->ib_dev->node_guid;
+       resp->sys_image_guid    = attr->sys_image_guid;
+       resp->max_mr_size       = attr->max_mr_size;
+       resp->page_size_cap     = attr->page_size_cap;
+       resp->vendor_id         = attr->vendor_id;
+       resp->vendor_part_id    = attr->vendor_part_id;
+       resp->hw_ver            = attr->hw_ver;
+       resp->max_qp            = attr->max_qp;
+       resp->max_qp_wr         = attr->max_qp_wr;
+       resp->device_cap_flags  = attr->device_cap_flags;
+       resp->max_sge           = attr->max_sge;
+       resp->max_sge_rd        = attr->max_sge_rd;
+       resp->max_cq            = attr->max_cq;
+       resp->max_cqe           = attr->max_cqe;
+       resp->max_mr            = attr->max_mr;
+       resp->max_pd            = attr->max_pd;
+       resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
+       resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
+       resp->max_res_rd_atom   = attr->max_res_rd_atom;
+       resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
+       resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
+       resp->atomic_cap                = attr->atomic_cap;
+       resp->max_ee                    = attr->max_ee;
+       resp->max_rdd                   = attr->max_rdd;
+       resp->max_mw                    = attr->max_mw;
+       resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
+       resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
+       resp->max_mcast_grp             = attr->max_mcast_grp;
+       resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
+       resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+       resp->max_ah                    = attr->max_ah;
+       resp->max_fmr                   = attr->max_fmr;
+       resp->max_map_per_fmr           = attr->max_map_per_fmr;
+       resp->max_srq                   = attr->max_srq;
+       resp->max_srq_wr                = attr->max_srq_wr;
+       resp->max_srq_sge               = attr->max_srq_sge;
+       resp->max_pkeys                 = attr->max_pkeys;
+       resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
+       resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+}
+
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                               const char __user *buf,
                               int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                return ret;
 
        memset(&resp, 0, sizeof resp);
-
-       resp.fw_ver                    = attr.fw_ver;
-       resp.node_guid                 = file->device->ib_dev->node_guid;
-       resp.sys_image_guid            = attr.sys_image_guid;
-       resp.max_mr_size               = attr.max_mr_size;
-       resp.page_size_cap             = attr.page_size_cap;
-       resp.vendor_id                 = attr.vendor_id;
-       resp.vendor_part_id            = attr.vendor_part_id;
-       resp.hw_ver                    = attr.hw_ver;
-       resp.max_qp                    = attr.max_qp;
-       resp.max_qp_wr                 = attr.max_qp_wr;
-       resp.device_cap_flags          = attr.device_cap_flags;
-       resp.max_sge                   = attr.max_sge;
-       resp.max_sge_rd                = attr.max_sge_rd;
-       resp.max_cq                    = attr.max_cq;
-       resp.max_cqe                   = attr.max_cqe;
-       resp.max_mr                    = attr.max_mr;
-       resp.max_pd                    = attr.max_pd;
-       resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
-       resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
-       resp.max_res_rd_atom           = attr.max_res_rd_atom;
-       resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
-       resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
-       resp.atomic_cap                = attr.atomic_cap;
-       resp.max_ee                    = attr.max_ee;
-       resp.max_rdd                   = attr.max_rdd;
-       resp.max_mw                    = attr.max_mw;
-       resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
-       resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
-       resp.max_mcast_grp             = attr.max_mcast_grp;
-       resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
-       resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
-       resp.max_ah                    = attr.max_ah;
-       resp.max_fmr                   = attr.max_fmr;
-       resp.max_map_per_fmr           = attr.max_map_per_fmr;
-       resp.max_srq                   = attr.max_srq;
-       resp.max_srq_wr                = attr.max_srq_wr;
-       resp.max_srq_sge               = attr.max_srq_sge;
-       resp.max_pkeys                 = attr.max_pkeys;
-       resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
-       resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+       copy_query_dev_fields(file, &resp, &attr);
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
        if (qp->real_qp == qp) {
                ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
                if (ret)
-                       goto out;
+                       goto release_qp;
                ret = qp->device->modify_qp(qp, attr,
                        modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
        } else {
                ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
        }
 
-       put_qp_read(qp);
-
        if (ret)
-               goto out;
+               goto release_qp;
 
        ret = in_len;
 
+release_qp:
+       put_qp_read(qp);
+
 out:
        kfree(attr);
 
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+                             struct ib_udata *ucore,
+                             struct ib_udata *uhw)
+{
+       struct ib_uverbs_ex_query_device_resp resp;
+       struct ib_uverbs_ex_query_device  cmd;
+       struct ib_device_attr attr;
+       struct ib_device *device;
+       int err;
+
+       device = file->device->ib_dev;
+       if (ucore->inlen < sizeof(cmd))
+               return -EINVAL;
+
+       err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+       if (err)
+               return err;
+
+       if (cmd.comp_mask)
+               return -EINVAL;
+
+       if (cmd.reserved)
+               return -EINVAL;
+
+       resp.response_length = offsetof(typeof(resp), odp_caps);
+
+       if (ucore->outlen < resp.response_length)
+               return -ENOSPC;
+
+       err = device->query_device(device, &attr);
+       if (err)
+               return err;
+
+       copy_query_dev_fields(file, &resp.base, &attr);
+       resp.comp_mask = 0;
+
+       if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
+               goto end;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+       resp.odp_caps.per_transport_caps.rc_odp_caps =
+               attr.odp_caps.per_transport_caps.rc_odp_caps;
+       resp.odp_caps.per_transport_caps.uc_odp_caps =
+               attr.odp_caps.per_transport_caps.uc_odp_caps;
+       resp.odp_caps.per_transport_caps.ud_odp_caps =
+               attr.odp_caps.per_transport_caps.ud_odp_caps;
+       resp.odp_caps.reserved = 0;
+#else
+       memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
+#endif
+       resp.response_length += sizeof(resp.odp_caps);
+
+end:
+       err = ib_copy_to_udata(ucore, &resp, resp.response_length);
+       if (err)
+               return err;
+
+       return 0;
+}
index 5db1a8cc388da0c5de517bf69b3d8136b94a1bbf..259dcc7779f5e01bc95b66ca90e64d20f7c94087 100644 (file)
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
+       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device,
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index 794555dc86a598a78125edc38a01299157e9caeb..bdfac2ccb704ab43403a5245448ef1184143e738 100644 (file)
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
        struct c4iw_cq *chp;
        unsigned long flag;
 
+       spin_lock_irqsave(&dev->lock, flag);
        chp = get_chp(dev, qid);
        if (chp) {
+               atomic_inc(&chp->refcnt);
+               spin_unlock_irqrestore(&dev->lock, flag);
                t4_clear_cq_armed(&chp->cq);
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-       } else
+               if (atomic_dec_and_test(&chp->refcnt))
+                       wake_up(&chp->wait);
+       } else {
                PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+               spin_unlock_irqrestore(&dev->lock, flag);
+       }
        return 0;
 }
index b5678ac97393ab94ba73b6b1f396ae801801c480..d87e1650f6437835f3660c21d3a59ec920fa8f7c 100644 (file)
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
        return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
-#define C4IW_WR_TO (30*HZ)
+#define C4IW_WR_TO (60*HZ)
 
 struct c4iw_wr_wait {
        struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
                                 u32 hwtid, u32 qpid,
                                 const char *func)
 {
-       unsigned to = C4IW_WR_TO;
        int ret;
 
-       do {
-               ret = wait_for_completion_timeout(&wr_waitp->completion, to);
-               if (!ret) {
-                       printk(KERN_ERR MOD "%s - Device %s not responding - "
-                              "tid %u qpid %u\n", func,
-                              pci_name(rdev->lldi.pdev), hwtid, qpid);
-                       if (c4iw_fatal_error(rdev)) {
-                               wr_waitp->ret = -EIO;
-                               break;
-                       }
-                       to = to << 2;
-               }
-       } while (!ret);
+       if (c4iw_fatal_error(rdev)) {
+               wr_waitp->ret = -EIO;
+               goto out;
+       }
+
+       ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
+       if (!ret) {
+               PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+                    func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+               rdev->flags |= T4_FATAL_ERROR;
+               wr_waitp->ret = -EIO;
+       }
+out:
        if (wr_waitp->ret)
                PDBG("%s: FW reply %d tid %u qpid %u\n",
                     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
index 4977082e081f2303542d34ef230c5d199b2b2f31..33c45dfcbd88cb11a05c637e22159cc34008a718 100644 (file)
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!(d_unhashed(tmp) && tmp->d_inode)) {
+       if (!d_unhashed(tmp) && tmp->d_inode) {
                dget_dlock(tmp);
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
index 6559af60bffd62fbf162320379ff545ca4c974fc..e08db7020cd4939809dd456882ff3d4e69480480 100644 (file)
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
 /* clean up any chip type-specific stuff */
 void ipath_chip_done(void);
 
-/* check to see if we have to force ordering for write combining */
-int ipath_unordered_wc(void);
-
 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
                          unsigned cnt);
 void ipath_cancel_sends(struct ipath_devdata *, int);
index 1d7bd82a1fb1fa3ffe8c4cebf08a3cd1b57529ce..1a7e20a75149ce6a6bab980b466585784595c0f1 100644 (file)
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
 {
        return 0;
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int ipath_unordered_wc(void)
-{
-       return 1;
-}
index 3428acb0868c202383304105546fc023d3a0917e..4ad0b932df1fab1c1897f144db9ffc8af35c5f73 100644 (file)
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
                dd->ipath_wc_cookie = 0; /* even on failure */
        }
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering.  Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int ipath_unordered_wc(void)
-{
-       return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
index 56a593e0ae5d1f537db0f3615ee29eb99b0defc2..39a488889fc7a9981213b25567f051dc772bc510 100644 (file)
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
                *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
                if (*slave < 0) {
                        mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
-                                       gid.global.interface_id);
+                                    be64_to_cpu(gid.global.interface_id));
                        return -ENOENT;
                }
                return 0;
index 543ecdd8667bad824fa3313a5b45be9693a5fc69..0176caa5792c4576276470c2c3f86f0fca16a7bd 100644 (file)
@@ -369,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int err;
 
        mutex_lock(&cq->resize_mutex);
-
-       if (entries < 1) {
+       if (entries < 1 || entries > dev->dev->caps.max_cqes) {
                err = -EINVAL;
                goto out;
        }
@@ -381,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                goto out;
        }
 
-       if (entries > dev->dev->caps.max_cqes) {
+       if (entries > dev->dev->caps.max_cqes + 1) {
                err = -EINVAL;
                goto out;
        }
@@ -394,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                /* Can't be smaller than the number of outstanding CQEs */
                outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
                if (entries < outst_cqe + 1) {
-                       err = 0;
+                       err = -EINVAL;
                        goto out;
                }
 
index eb8e215f1613ee95ae7fb6253ba6b06220f1b00c..ac6e2b710ea6fef928271869f0e170f2bbdb158d 100644 (file)
@@ -1269,8 +1269,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
        struct mlx4_ib_steering *ib_steering = NULL;
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
        struct mlx4_flow_reg_id reg_id;
 
        if (mdev->dev->caps.steering_mode ==
@@ -1284,8 +1283,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                                    !!(mqp->flags &
                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
                                    prot, &reg_id.id);
-       if (err)
+       if (err) {
+               pr_err("multicast attach op failed, err %d\n", err);
                goto err_malloc;
+       }
 
        reg_id.mirror = 0;
        if (mlx4_is_bonded(dev)) {
@@ -1348,9 +1349,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct net_device *ndev;
        struct mlx4_ib_gid_entry *ge;
        struct mlx4_flow_reg_id reg_id = {0, 0};
-
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
index dfc6ca128a7e355ef737976dceee50a7033e62b7..ed2bd6701f9b131c3dc3261cb2eae21a2d835524 100644 (file)
@@ -1696,8 +1696,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
-                               if (err)
-                                       return -EINVAL;
+                               if (err) {
+                                       err = -EINVAL;
+                                       goto out;
+                               }
                                if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
                                        dev->qp1_proxy[qp->port - 1] = qp;
                        }
index 03bf81211a5401c366522c68213ecf27cdf4b326..cc4ac1e583b29725af01e03e40bebaee758c6e01 100644 (file)
@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
        struct mlx5_general_caps *gen;
-       int err = 0;
+       int err = -ENOMEM;
        int port;
 
        gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+       dev->ib_dev.uverbs_ex_cmd_mask =
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 32a28bd50b20ae08c41a9a7086b72045f7934c3d..cd9822eeacae3f1ab138731ea9c6a67963974bc2 100644 (file)
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
                goto err_2;
        }
        mr->umem = umem;
+       mr->dev = dev;
        mr->live = 1;
        kvfree(in);
 
index b43456ae124bccb99cfe78446b1e1ce347c8f2a6..c9780d919769a6ef9a0020b7e710afa7e7ce2497 100644 (file)
@@ -40,7 +40,7 @@
 #include <be_roce.h>
 #include "ocrdma_sli.h"
 
-#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
 
 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+#define EQ_INTR_PER_SEC_THRSH_HI 150000
+#define EQ_INTR_PER_SEC_THRSH_LOW 100000
+#define EQ_AIC_MAX_EQD 20
+#define EQ_AIC_MIN_EQD 0
+
+void ocrdma_eqd_set_task(struct work_struct *work);
 
 struct ocrdma_dev_attr {
        u8 fw_ver[32];
        u32 vendor_id;
        u32 device_id;
        u16 max_pd;
+       u16 max_dpp_pds;
        u16 max_cq;
        u16 max_cqe;
        u16 max_qp;
@@ -116,12 +123,19 @@ struct ocrdma_queue_info {
        bool created;
 };
 
+struct ocrdma_aic_obj {         /* Adaptive interrupt coalescing (AIC) info */
+       u32 prev_eqd;
+       u64 eq_intr_cnt;
+       u64 prev_eq_intr_cnt;
+};
+
 struct ocrdma_eq {
        struct ocrdma_queue_info q;
        u32 vector;
        int cq_cnt;
        struct ocrdma_dev *dev;
        char irq_name[32];
+       struct ocrdma_aic_obj aic_obj;
 };
 
 struct ocrdma_mq {
@@ -171,6 +185,21 @@ struct ocrdma_stats {
        struct ocrdma_dev *dev;
 };
 
+struct ocrdma_pd_resource_mgr {
+       u32 pd_norm_start;
+       u16 pd_norm_count;
+       u16 pd_norm_thrsh;
+       u16 max_normal_pd;
+       u32 pd_dpp_start;
+       u16 pd_dpp_count;
+       u16 pd_dpp_thrsh;
+       u16 max_dpp_pd;
+       u16 dpp_page_index;
+       unsigned long *pd_norm_bitmap;
+       unsigned long *pd_dpp_bitmap;
+       bool pd_prealloc_valid;
+};
+
 struct stats_mem {
        struct ocrdma_mqe mqe;
        void *va;
@@ -198,6 +227,7 @@ struct ocrdma_dev {
 
        struct ocrdma_eq *eq_tbl;
        int eq_cnt;
+       struct delayed_work eqd_work;
        u16 base_eqid;
        u16 max_eq;
 
@@ -255,7 +285,12 @@ struct ocrdma_dev {
        struct ocrdma_stats rx_qp_err_stats;
        struct ocrdma_stats tx_dbg_stats;
        struct ocrdma_stats rx_dbg_stats;
+       struct ocrdma_stats driver_stats;
+       struct ocrdma_stats reset_stats;
        struct dentry *dir;
+       atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
+       atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
+       struct ocrdma_pd_resource_mgr *pd_mgr;
 };
 
 struct ocrdma_cq {
@@ -335,7 +370,6 @@ struct ocrdma_srq {
 
 struct ocrdma_qp {
        struct ib_qp ibqp;
-       struct ocrdma_dev *dev;
 
        u8 __iomem *sq_db;
        struct ocrdma_qp_hwq_info sq;
index f3cc8c9e65ae70f9e0b157632189e324970cbea5..d812904f398473d1502bb979d6d822c04b55f2b8 100644 (file)
 #include <net/netevent.h>
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
 
 #include "ocrdma.h"
 #include "ocrdma_verbs.h"
 #include "ocrdma_ah.h"
 #include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
 
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-                       struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
+                       struct ib_ah_attr *attr, union ib_gid *sgid,
+                       int pdid, bool *isvlan)
 {
        int status = 0;
-       u16 vlan_tag; bool vlan_enabled = false;
+       u16 vlan_tag;
        struct ocrdma_eth_vlan eth;
        struct ocrdma_grh grh;
        int eth_sz;
@@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
                eth.vlan_tag = cpu_to_be16(vlan_tag);
                eth_sz = sizeof(struct ocrdma_eth_vlan);
-               vlan_enabled = true;
+               *isvlan = true;
        } else {
                eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
        memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
-       if (vlan_enabled)
+       if (*isvlan)
                ah->av->valid |= OCRDMA_AV_VLAN_VALID;
        ah->av->valid = cpu_to_le32(ah->av->valid);
        return status;
@@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
 {
        u32 *ahid_addr;
+       bool isvlan = false;
        int status;
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                }
        }
 
-       status = set_av_attr(dev, ah, attr, &sgid, pd->id);
+       status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
        if (status)
                goto av_conf_err;
 
        /* if pd is for the user process, pass the ah_id to user space */
        if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
                ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
-               *ahid_addr = ah->id;
+               *ahid_addr = 0;
+               *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+               if (isvlan)
+                       *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
+                                      OCRDMA_AH_VLAN_VALID_SHIFT);
        }
+
        return &ah->ibah;
 
 av_conf_err:
@@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
                       struct ib_grh *in_grh,
                       struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-       return IB_MAD_RESULT_SUCCESS;
+       int status;
+       struct ocrdma_dev *dev;
+
+       switch (in_mad->mad_hdr.mgmt_class) {
+       case IB_MGMT_CLASS_PERF_MGMT:
+               dev = get_ocrdma_dev(ibdev);
+               if (!ocrdma_pma_counters(dev, out_mad))
+                       status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+               else
+                       status = IB_MAD_RESULT_SUCCESS;
+               break;
+       default:
+               status = IB_MAD_RESULT_SUCCESS;
+               break;
+       }
+       return status;
 }
index 8ac49e7f96d1585c7fb94049dd7db88a0699b7f4..726a87cf22dcb215d2a105f08a054395bb0c6804 100644 (file)
 #ifndef __OCRDMA_AH_H__
 #define __OCRDMA_AH_H__
 
+enum {
+       OCRDMA_AH_ID_MASK               = 0x3FF,
+       OCRDMA_AH_VLAN_VALID_MASK       = 0x01,
+       OCRDMA_AH_VLAN_VALID_SHIFT      = 0x1F
+};
+
 struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
 int ocrdma_destroy_ah(struct ib_ah *);
 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
index 638bff1ffc6c73b95a41a1556ff42a06680d6bca..0c9e95909a64651e931f4768e88f97e266c8379e 100644 (file)
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
                break;
        }
 
+       if (type < OCRDMA_MAX_ASYNC_ERRORS)
+               atomic_inc(&dev->async_err_stats[type]);
+
        if (qp_event) {
                if (qp->ibqp.event_handler)
                        qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
        return 0;
 }
 
-static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
-                                      struct ocrdma_cq *cq)
+static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+                               struct ocrdma_cq *cq, bool sq)
 {
-       unsigned long flags;
        struct ocrdma_qp *qp;
-       bool buddy_cq_found = false;
-       /* Go through list of QPs in error state which are using this CQ
-        * and invoke its callback handler to trigger CQE processing for
-        * error/flushed CQE. It is rare to find more than few entries in
-        * this list as most consumers stops after getting error CQE.
-        * List is traversed only once when a matching buddy cq found for a QP.
-        */
-       spin_lock_irqsave(&dev->flush_q_lock, flags);
-       list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+       struct list_head *cur;
+       struct ocrdma_cq *bcq = NULL;
+       struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
+
+       list_for_each(cur, head) {
+               if (sq)
+                       qp = list_entry(cur, struct ocrdma_qp, sq_entry);
+               else
+                       qp = list_entry(cur, struct ocrdma_qp, rq_entry);
+
                if (qp->srq)
                        continue;
                /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
                 * if completion came on rq, sq's cq is buddy cq.
                 */
                if (qp->sq_cq == cq)
-                       cq = qp->rq_cq;
+                       bcq = qp->rq_cq;
                else
-                       cq = qp->sq_cq;
-               buddy_cq_found = true;
-               break;
+                       bcq = qp->sq_cq;
+               return bcq;
        }
+       return NULL;
+}
+
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+                                      struct ocrdma_cq *cq)
+{
+       unsigned long flags;
+       struct ocrdma_cq *bcq = NULL;
+
+       /* Go through list of QPs in error state which are using this CQ
+        * and invoke its callback handler to trigger CQE processing for
+        * error/flushed CQE. It is rare to find more than few entries in
+        * this list as most consumers stops after getting error CQE.
+        * List is traversed only once when a matching buddy cq found for a QP.
+        */
+       spin_lock_irqsave(&dev->flush_q_lock, flags);
+       /* Check if buddy CQ is present.
+        * true - Check for  SQ CQ
+        * false - Check for RQ CQ
+        */
+       bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
+       if (bcq == NULL)
+               bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
        spin_unlock_irqrestore(&dev->flush_q_lock, flags);
-       if (buddy_cq_found == false)
-               return;
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
-               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+
+       /* if there is valid buddy cq, look for its completion handler */
+       if (bcq && bcq->ibcq.comp_handler) {
+               spin_lock_irqsave(&bcq->comp_handler_lock, flags);
+               (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
+               spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
        }
 }
 
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
 
        } while (budget);
 
+       eq->aic_obj.eq_intr_cnt++;
        ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
        return IRQ_HANDLED;
 }
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+       attr->max_dpp_pds =
+          (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
        attr->max_qp =
            (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
        return status;
 }
 
+
+static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
+{
+       int status = -ENOMEM;
+       size_t pd_bitmap_size;
+       struct ocrdma_alloc_pd_range *cmd;
+       struct ocrdma_alloc_pd_range_rsp *rsp;
+
+       /* Pre allocate the DPP PDs */
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               return -ENOMEM;
+       cmd->pd_count = dev->attr.max_dpp_pds;
+       cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+       if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
+               dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+                               OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+               dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+                               OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+               dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+               dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
+                                                    GFP_KERNEL);
+       }
+       kfree(cmd);
+
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+       if (rsp->pd_count) {
+               dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
+                                       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+               dev->pd_mgr->max_normal_pd = rsp->pd_count;
+               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+               dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
+                                                     GFP_KERNEL);
+       }
+
+       if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
+               /* Enable PD resource manager */
+               dev->pd_mgr->pd_prealloc_valid = true;
+       } else {
+               return -ENOMEM;
+       }
+mbx_err:
+       kfree(cmd);
+       return status;
+}
+
+static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
+{
+       struct ocrdma_dealloc_pd_range *cmd;
+
+       /* return normal PDs to firmware */
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               goto mbx_err;
+
+       if (dev->pd_mgr->max_normal_pd) {
+               cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
+               cmd->pd_count = dev->pd_mgr->max_normal_pd;
+               ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       }
+
+       if (dev->pd_mgr->max_dpp_pd) {
+               kfree(cmd);
+               /* return DPP PDs to firmware */
+               cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
+                                         sizeof(*cmd));
+               if (!cmd)
+                       goto mbx_err;
+
+               cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
+               cmd->pd_count = dev->pd_mgr->max_dpp_pd;
+               ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       }
+mbx_err:
+       kfree(cmd);
+}
+
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
+{
+       int status;
+
+       dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
+                             GFP_KERNEL);
+       if (!dev->pd_mgr) {
+               pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+               return;
+       }
+       status = ocrdma_mbx_alloc_pd_range(dev);
+       if (status) {
+               pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
+                        __func__, dev->id);
+       }
+}
+
+static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
+{
+       ocrdma_mbx_dealloc_pd_range(dev);
+       kfree(dev->pd_mgr->pd_norm_bitmap);
+       kfree(dev->pd_mgr->pd_dpp_bitmap);
+       kfree(dev->pd_mgr);
+}
+
 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
                               int *num_pages, int *page_size)
 {
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
 {
        bool found;
        unsigned long flags;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
-       spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
+       spin_lock_irqsave(&dev->flush_q_lock, flags);
        found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
        if (!found)
                list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
                if (!found)
                        list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
        }
-       spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
+       spin_unlock_irqrestore(&dev->flush_q_lock, flags);
 }
 
 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
        int status;
        u32 len, hw_pages, hw_page_size;
        dma_addr_t pa;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_wqe_allocated;
        u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
        int status;
        u32 len, hw_pages, hw_page_size;
        dma_addr_t pa = 0;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
 
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
                                        struct ocrdma_qp *qp)
 {
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        dma_addr_t pa = 0;
        int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
 {
        int status = -ENOMEM;
        u32 flags = 0;
-       struct ocrdma_dev *dev = qp->dev;
        struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        struct ocrdma_cq *cq;
        struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        union ib_gid sgid, zgid;
        u32 vlan_id;
        u8 mac_addr[6];
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
                return -EINVAL;
-       if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
-               ocrdma_init_service_level(qp->dev);
+       if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+               ocrdma_init_service_level(dev);
        cmd->params.tclass_sq_psn |=
            (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
        cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
        memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
               sizeof(cmd->params.dgid));
-       status = ocrdma_query_gid(&qp->dev->ibdev, 1,
+       status = ocrdma_query_gid(&dev->ibdev, 1,
                        ah_attr->grh.sgid_index, &sgid);
        if (status)
                return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
 
        qp->sgid_idx = ah_attr->grh.sgid_index;
        memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
-       ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
+       status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
+       if (status)
+               return status;
        cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
                                (mac_addr[2] << 16) | (mac_addr[3] << 24);
        /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
                    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
                cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
                cmd->params.rnt_rc_sl_fl |=
-                       (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
+                       (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
        }
        return 0;
 }
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                                struct ib_qp_attr *attrs, int attr_mask)
 {
        int status = 0;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                        return status;
        } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
                /* set the default mac address for UD, GSI QPs */
-               cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
-                       (qp->dev->nic_info.mac_addr[1] << 8) |
-                       (qp->dev->nic_info.mac_addr[2] << 16) |
-                       (qp->dev->nic_info.mac_addr[3] << 24);
-               cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
-                                       (qp->dev->nic_info.mac_addr[5] << 8);
+               cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
+                       (dev->nic_info.mac_addr[1] << 8) |
+                       (dev->nic_info.mac_addr[2] << 16) |
+                       (dev->nic_info.mac_addr[3] << 24);
+               cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
+                                       (dev->nic_info.mac_addr[5] << 8);
        }
        if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
            attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
        }
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-               if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
+               if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
        }
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-               if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
+               if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -2870,6 +3023,82 @@ done:
        return status;
 }
 
+static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+                                int num)
+{
+       int i, status = -ENOMEM;
+       struct ocrdma_modify_eqd_req *cmd;
+
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
+       if (!cmd)
+               return status;
+
+       ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
+                       OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+       cmd->cmd.num_eq = num;
+       for (i = 0; i < num; i++) {
+               cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
+               cmd->cmd.set_eqd[i].phase = 0;
+               cmd->cmd.set_eqd[i].delay_multiplier =
+                               (eq[i].aic_obj.prev_eqd * 65)/100;
+       }
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+mbx_err:
+       kfree(cmd);
+       return status;
+}
+
+static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+                            int num)
+{
+       int num_eqs, i = 0;
+       if (num > 8) {
+               while (num) {
+                       num_eqs = min(num, 8);
+                       ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
+                       i += num_eqs;
+                       num -= num_eqs;
+               }
+       } else {
+               ocrdma_mbx_modify_eqd(dev, eq, num);
+       }
+       return 0;
+}
+
+void ocrdma_eqd_set_task(struct work_struct *work)
+{
+       struct ocrdma_dev *dev =
+               container_of(work, struct ocrdma_dev, eqd_work.work);
+       struct ocrdma_eq *eq = 0;
+       int i, num = 0, status = -EINVAL;
+       u64 eq_intr;
+
+       for (i = 0; i < dev->eq_cnt; i++) {
+               eq = &dev->eq_tbl[i];
+               if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
+                       eq_intr = eq->aic_obj.eq_intr_cnt -
+                                 eq->aic_obj.prev_eq_intr_cnt;
+                       if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
+                           (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
+                               eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
+                               num++;
+                       } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
+                                  (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
+                               eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
+                               num++;
+                       }
+               }
+               eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
+       }
+
+       if (num)
+               status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+       schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
+}
+
 int ocrdma_init_hw(struct ocrdma_dev *dev)
 {
        int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
 
 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
 {
+       ocrdma_free_pd_pool(dev);
        ocrdma_mbx_delete_ah_tbl(dev);
 
        /* cleanup the eqs */
index 6eed8f191322a134fc0dcd1438cf771525c06a18..e905972fceb7d48ff882800390c1330367815caf 100644 (file)
@@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
 char *port_speed_string(struct ocrdma_dev *dev);
 void ocrdma_init_service_level(struct ocrdma_dev *);
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
+void ocrdma_free_pd_range(struct ocrdma_dev *dev);
 
 #endif                         /* __OCRDMA_HW_H__ */
index b0b2257b8e0430738cc7b5f5f36145868c7d4dc3..7a2b59aca004bfac1eae4fc258fcb08d077bf449 100644 (file)
@@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
 
        dev->ibdev.node_type = RDMA_NODE_IB_CA;
        dev->ibdev.phys_port_cnt = 1;
-       dev->ibdev.num_comp_vectors = 1;
+       dev->ibdev.num_comp_vectors = dev->eq_cnt;
 
        /* mandatory verbs. */
        dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
        if (dev->stag_arr == NULL)
                goto alloc_err;
 
+       ocrdma_alloc_pd_pool(dev);
+
        spin_lock_init(&dev->av_tbl.lock);
        spin_lock_init(&dev->flush_q_lock);
        return 0;
@@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
        spin_unlock(&ocrdma_devlist_lock);
        /* Init stats */
        ocrdma_add_port_stats(dev);
+       /* Interrupt Moderation */
+       INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
+       schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
 
        pr_info("%s %s: %s \"%s\" port %d\n",
                dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
        /* first unregister with stack to stop all the active traffic
         * of the registered clients.
         */
-       ocrdma_rem_port_stats(dev);
+       cancel_delayed_work_sync(&dev->eqd_work);
        ocrdma_remove_sysfiles(dev);
-
        ib_unregister_device(&dev->ibdev);
 
+       ocrdma_rem_port_stats(dev);
+
        spin_lock(&ocrdma_devlist_lock);
        list_del_rcu(&dev->entry);
        spin_unlock(&ocrdma_devlist_lock);
index 4e036480c1a8fa7d9f8d1be9e2267ea3147b2b5a..243c87c8bd65d09026f46ee12e3ee3b9109ce155 100644 (file)
@@ -75,6 +75,8 @@ enum {
        OCRDMA_CMD_DESTROY_RBQ = 26,
 
        OCRDMA_CMD_GET_RDMA_STATS = 27,
+       OCRDMA_CMD_ALLOC_PD_RANGE = 28,
+       OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
 
        OCRDMA_CMD_MAX
 };
@@ -87,6 +89,7 @@ enum {
        OCRDMA_CMD_CREATE_MQ            = 21,
        OCRDMA_CMD_GET_CTRL_ATTRIBUTES  = 32,
        OCRDMA_CMD_GET_FW_VER           = 35,
+       OCRDMA_CMD_MODIFY_EQ_DELAY      = 41,
        OCRDMA_CMD_DELETE_MQ            = 53,
        OCRDMA_CMD_DELETE_CQ            = 54,
        OCRDMA_CMD_DELETE_EQ            = 55,
@@ -101,7 +104,7 @@ enum {
        QTYPE_MCCQ      = 3
 };
 
-#define OCRDMA_MAX_SGID                8
+#define OCRDMA_MAX_SGID                16
 
 #define OCRDMA_MAX_QP    2048
 #define OCRDMA_MAX_CQ    2048
@@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
 
 #define OCRDMA_EQ_MINOR_OTHER  0x1
 
+struct ocrmda_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+};
+
+struct ocrdma_modify_eqd_cmd {
+       struct ocrdma_mbx_hdr req;
+       u32 num_eq;
+       struct ocrmda_set_eqd set_eqd[8];
+} __packed;
+
+struct ocrdma_modify_eqd_req {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_modify_eqd_cmd cmd;
+};
+
+
+struct ocrdma_modify_eq_delay_rsp {
+       struct ocrdma_mbx_rsp hdr;
+       u32 rsvd0;
+} __packed;
+
 enum {
        OCRDMA_MCQE_STATUS_SHIFT        = 0,
        OCRDMA_MCQE_STATUS_MASK         = 0xFFFF,
@@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
        OCRDMA_DEVICE_FATAL_EVENT       = 0x08,
        OCRDMA_SRQCAT_ERROR             = 0x0E,
        OCRDMA_SRQ_LIMIT_EVENT          = 0x0F,
-       OCRDMA_QP_LAST_WQE_EVENT        = 0x10
+       OCRDMA_QP_LAST_WQE_EVENT        = 0x10,
+
+       OCRDMA_MAX_ASYNC_ERRORS
 };
 
 /* mailbox command request and responses */
@@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
        struct ocrdma_mbx_rsp rsp;
 };
 
+struct ocrdma_alloc_pd_range {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 enable_dpp_rsvd;
+       u32 pd_count;
+};
+
+struct ocrdma_alloc_pd_range_rsp {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_rsp rsp;
+       u32 dpp_page_pdid;
+       u32 pd_count;
+};
+
+enum {
+       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
+};
+
+struct ocrdma_dealloc_pd_range {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 start_pd_id;
+       u32 pd_count;
+};
+
+struct ocrdma_dealloc_pd_range_rsp {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 rsvd;
+};
+
 enum {
        OCRDMA_ADDR_CHECK_ENABLE        = 1,
        OCRDMA_ADDR_CHECK_DISABLE       = 0
@@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
        OCRDMA_CQE_INV_EEC_STATE_ERR,
        OCRDMA_CQE_FATAL_ERR,
        OCRDMA_CQE_RESP_TIMEOUT_ERR,
-       OCRDMA_CQE_GENERAL_ERR
+       OCRDMA_CQE_GENERAL_ERR,
+
+       OCRDMA_MAX_CQE_ERR
 };
 
 enum {
@@ -1673,6 +1734,7 @@ enum {
        OCRDMA_FLAG_FENCE_R     = 0x8,
        OCRDMA_FLAG_SOLICIT     = 0x10,
        OCRDMA_FLAG_IMM         = 0x20,
+       OCRDMA_FLAG_AH_VLAN_PR  = 0x40,
 
        /* Stag flags */
        OCRDMA_LKEY_FLAG_LOCAL_WR       = 0x1,
index 41a9aec9998d103f81dc325129cdab47bd20e2ff..48d7ef51aa0c209678e0ed4bbe97bc5ff9a881d5 100644 (file)
@@ -26,6 +26,7 @@
  *******************************************************************/
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_pma.h>
 #include "ocrdma_stats.h"
 
 static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
        return stats;
 }
 
+static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+       return convert_to_64bit(rx_stats->roce_frames_lo,
+               rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
+               + (u64)rx_stats->roce_frame_payload_len_drops;
+}
+
+static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+       return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+               rx_stats->roce_frame_bytes_hi))/4;
+}
+
 static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
 {
        char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
        return stats;
 }
 
+static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+       return (convert_to_64bit(tx_stats->send_pkts_lo,
+                                tx_stats->send_pkts_hi) +
+       convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
+       convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
+       convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+                        tx_stats->read_rsp_pkts_hi) +
+       convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
+}
+
+static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+       return (convert_to_64bit(tx_stats->send_bytes_lo,
+                                tx_stats->send_bytes_hi) +
+               convert_to_64bit(tx_stats->write_bytes_lo,
+                                tx_stats->write_bytes_hi) +
+               convert_to_64bit(tx_stats->read_req_bytes_lo,
+                                tx_stats->read_req_bytes_hi) +
+               convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+                                tx_stats->read_rsp_bytes_hi))/4;
+}
+
 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
 {
        char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
        return dev->stats_mem.debugfs_mem;
 }
 
+static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
+{
+       char *stats = dev->stats_mem.debugfs_mem, *pcur;
+
+
+       memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+       pcur = stats;
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
+                               (u64)(dev->async_err_stats
+                               [OCRDMA_CQ_ERROR].counter));
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_CQ_OVERRUN_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_CQ_QPCAT_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_ACCESS_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_COMM_EST_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SQ_DRAINED_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_DEVICE_FATAL_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SRQCAT_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SRQ_LIMIT_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_LAST_WQE_EVENT].counter);
+
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_LEN_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_PROT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_WR_FLUSH_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_MW_BIND_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_BAD_RESP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_ACCESS_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RETRY_EXC_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_ABORT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_INV_EECN_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_FATAL_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_GENERAL_ERR].counter);
+       return stats;
+}
+
 static void ocrdma_update_stats(struct ocrdma_dev *dev)
 {
        ulong now = jiffies, secs;
        int status = 0;
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+                     (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
 
        secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
        if (secs) {
@@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
                if (status)
                        pr_err("%s: stats mbox failed with status = %d\n",
                               __func__, status);
+               /* Update PD counters from PD resource manager */
+               if (dev->pd_mgr->pd_prealloc_valid) {
+                       rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
+                       rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
+                       /* Threshold stata*/
+                       rsrc_stats = &rdma_stats->th_rsrc_stats;
+                       rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
+                       rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
+               }
                dev->last_stats_time = jiffies;
        }
 }
 
+static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
+                                       const char __user *buffer,
+                                       size_t count, loff_t *ppos)
+{
+       char tmp_str[32];
+       long reset;
+       int status = 0;
+       struct ocrdma_stats *pstats = filp->private_data;
+       struct ocrdma_dev *dev = pstats->dev;
+
+       if (count > 32)
+               goto err;
+
+       if (copy_from_user(tmp_str, buffer, count))
+               goto err;
+
+       tmp_str[count-1] = '\0';
+       if (kstrtol(tmp_str, 10, &reset))
+               goto err;
+
+       switch (pstats->type) {
+       case OCRDMA_RESET_STATS:
+               if (reset) {
+                       status = ocrdma_mbx_rdma_stats(dev, true);
+                       if (status) {
+                               pr_err("Failed to reset stats = %d", status);
+                               goto err;
+                       }
+               }
+               break;
+       default:
+               goto err;
+       }
+
+       return count;
+err:
+       return -EFAULT;
+}
+
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+                       struct ib_mad *out_mad)
+{
+       struct ib_pma_portcounters *pma_cnt;
+
+       memset(out_mad->data, 0, sizeof out_mad->data);
+       pma_cnt = (void *)(out_mad->data + 40);
+       ocrdma_update_stats(dev);
+
+       pma_cnt->port_xmit_data    = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
+       pma_cnt->port_rcv_data     = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
+       pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
+       pma_cnt->port_rcv_packets  = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
+       return 0;
+}
+
 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
                                        size_t usr_buf_len, loff_t *ppos)
 {
@@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
        case OCRDMA_RX_DBG_STATS:
                data = ocrdma_rx_dbg_stats(dev);
                break;
+       case OCRDMA_DRV_STATS:
+               data = ocrdma_driver_dbg_stats(dev);
+               break;
 
        default:
                status = -EFAULT;
@@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
        .owner = THIS_MODULE,
        .open = simple_open,
        .read = ocrdma_dbgfs_ops_read,
+       .write = ocrdma_dbgfs_ops_write,
 };
 
 void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
                                 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
                goto err;
 
+       dev->driver_stats.type = OCRDMA_DRV_STATS;
+       dev->driver_stats.dev = dev;
+       if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+                                       &dev->driver_stats, &ocrdma_dbg_ops))
+               goto err;
+
+       dev->reset_stats.type = OCRDMA_RESET_STATS;
+       dev->reset_stats.dev = dev;
+       if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+                               &dev->reset_stats, &ocrdma_dbg_ops))
+               goto err;
+
        /* Now create dma_mem for stats mbx command */
        if (!ocrdma_alloc_stats_mem(dev))
                goto err;
index 5f5e20c46d7ccdc9fc02aa76084b971ad4bad4c7..091edd68a8a34678e5283b2374c58274c84580b3 100644 (file)
@@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
        OCRDMA_RXQP_ERRSTATS,
        OCRDMA_TXQP_ERRSTATS,
        OCRDMA_TX_DBG_STATS,
-       OCRDMA_RX_DBG_STATS
+       OCRDMA_RX_DBG_STATS,
+       OCRDMA_DRV_STATS,
+       OCRDMA_RESET_STATS
 };
 
 void ocrdma_rem_debugfs(void);
 void ocrdma_init_debugfs(void);
 void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
 void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+                       struct ib_mad *out_mad);
 
 #endif /* __OCRDMA_STATS_H__ */
index fb8d8c4dfbb97d2b36abdf69888793741aba182a..877175563634df79a889ed8a428405258b9df1e4 100644 (file)
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
 
        dev = get_ocrdma_dev(ibdev);
        memset(sgid, 0, sizeof(*sgid));
-       if (index > OCRDMA_MAX_SGID)
+       if (index >= OCRDMA_MAX_SGID)
                return -EINVAL;
 
        memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
        return found;
 }
 
+
+static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
+{
+       u16 pd_bitmap_idx = 0;
+       const unsigned long *pd_bitmap;
+
+       if (dpp_pool) {
+               pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
+               pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+                                                   dev->pd_mgr->max_dpp_pd);
+               __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+               dev->pd_mgr->pd_dpp_count++;
+               if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
+                       dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
+       } else {
+               pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
+               pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+                                                   dev->pd_mgr->max_normal_pd);
+               __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+               dev->pd_mgr->pd_norm_count++;
+               if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
+                       dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
+       }
+       return pd_bitmap_idx;
+}
+
+static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
+                                       bool dpp_pool)
+{
+       u16 pd_count;
+       u16 pd_bit_index;
+
+       pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
+                             dev->pd_mgr->pd_norm_count;
+       if (pd_count == 0)
+               return -EINVAL;
+
+       if (dpp_pool) {
+               pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
+               if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
+                       return -EINVAL;
+               } else {
+                       __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
+                       dev->pd_mgr->pd_dpp_count--;
+               }
+       } else {
+               pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
+               if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
+                       return -EINVAL;
+               } else {
+                       __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
+                       dev->pd_mgr->pd_norm_count--;
+               }
+       }
+
+       return 0;
+}
+
+static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+                                  bool dpp_pool)
+{
+       int status;
+
+       mutex_lock(&dev->dev_lock);
+       status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
+       mutex_unlock(&dev->dev_lock);
+       return status;
+}
+
+static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+       u16 pd_idx = 0;
+       int status = 0;
+
+       mutex_lock(&dev->dev_lock);
+       if (pd->dpp_enabled) {
+               /* try allocating DPP PD, if not available then normal PD */
+               if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
+                       pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
+                       pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
+               } else if (dev->pd_mgr->pd_norm_count <
+                          dev->pd_mgr->max_normal_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+                       pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+                       pd->dpp_enabled = false;
+               } else {
+                       status = -EINVAL;
+               }
+       } else {
+               if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+                       pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+               } else {
+                       status = -EINVAL;
+               }
+       }
+       mutex_unlock(&dev->dev_lock);
+       return status;
+}
+
 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                          struct ocrdma_ucontext *uctx,
                                          struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                           dev->attr.wqe_size) : 0;
        }
 
+       if (dev->pd_mgr->pd_prealloc_valid) {
+               status = ocrdma_get_pd_num(dev, pd);
+               return (status == 0) ? pd : ERR_PTR(status);
+       }
+
 retry:
        status = ocrdma_mbx_alloc_pd(dev, pd);
        if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
 {
        int status = 0;
 
-       status = ocrdma_mbx_dealloc_pd(dev, pd);
+       if (dev->pd_mgr->pd_prealloc_valid)
+               status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+       else
+               status = ocrdma_mbx_dealloc_pd(dev, pd);
+
        kfree(pd);
        return status;
 }
@@ -325,7 +435,6 @@ err:
 
 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
 {
-       int status = 0;
        struct ocrdma_pd *pd = uctx->cntxt_pd;
        struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
                       __func__, dev->id, pd->id);
        }
        uctx->cntxt_pd = NULL;
-       status = _ocrdma_dealloc_pd(dev, pd);
-       return status;
+       (void)_ocrdma_dealloc_pd(dev, pd);
+       return 0;
 }
 
 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
        if (is_uctx_pd) {
                ocrdma_release_ucontext_pd(uctx);
        } else {
-               status = ocrdma_mbx_dealloc_pd(dev, pd);
+               status = _ocrdma_dealloc_pd(dev, pd);
                kfree(pd);
        }
 exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 {
        struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
        struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
-       int status;
 
-       status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+       (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
 
        ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
 
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 
        /* Don't stop cleanup, in case FW is unresponsive */
        if (dev->mqe_ctx.fw_error_state) {
-               status = 0;
                pr_err("%s(%d) fw not responding.\n",
                       __func__, dev->id);
        }
-       return status;
+       return 0;
 }
 
 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
 
 int ocrdma_destroy_cq(struct ib_cq *ibcq)
 {
-       int status;
        struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
        struct ocrdma_eq *eq = NULL;
        struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
        synchronize_irq(irq);
        ocrdma_flush_cq(cq);
 
-       status = ocrdma_mbx_destroy_cq(dev, cq);
+       (void)ocrdma_mbx_destroy_cq(dev, cq);
        if (cq->ucontext) {
                pdid = cq->ucontext->cntxt_pd->id;
                ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
        }
 
        kfree(cq);
-       return status;
+       return 0;
 }
 
 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
        int status = 0;
        u64 usr_db;
        struct ocrdma_create_qp_uresp uresp;
-       struct ocrdma_dev *dev = qp->dev;
        struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
        memset(&uresp, 0, sizeof(uresp));
        usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
                status = -ENOMEM;
                goto gen_err;
        }
-       qp->dev = dev;
        ocrdma_set_qp_init_params(qp, pd, attrs);
        if (udata == NULL)
                qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        enum ib_qp_state old_qps;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
        if (attr_mask & IB_QP_STATE)
                status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
        /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        enum ib_qp_state old_qps, new_qps;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
 
        /* syncronize with multiple context trying to change, retrive qps */
        mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
        u32 qp_state;
        struct ocrdma_qp_params params;
        struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
 
        memset(&params, 0, sizeof(params));
        mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
                goto mbx_err;
        if (qp->qp_type == IB_QPT_UD)
                qp_attr->qkey = params.qkey;
-       qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
-       qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
        qp_attr->path_mtu =
                ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
                                OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
        memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
        qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
                    OCRDMA_QP_PARAMS_STATE_SHIFT;
+       qp_attr->qp_state = get_ibqp_state(qp_state);
+       qp_attr->cur_qp_state = qp_attr->qp_state;
        qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
        qp_attr->max_dest_rd_atomic =
            params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
            params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
        qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
                                OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
+       /* Sync driver QP state with FW */
+       ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
 mbx_err:
        return status;
 }
 
-static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
 {
-       int i = idx / 32;
-       unsigned int mask = (1 << (idx % 32));
+       unsigned int i = idx / 32;
+       u32 mask = (1U << (idx % 32));
 
-       if (srq->idx_bit_fields[i] & mask)
-               srq->idx_bit_fields[i] &= ~mask;
-       else
-               srq->idx_bit_fields[i] |= mask;
+       srq->idx_bit_fields[i] ^= mask;
 }
 
 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
 {
        int found = false;
        unsigned long flags;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        /* sync with any active CQ poll */
 
        spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
 
 int ocrdma_destroy_qp(struct ib_qp *ibqp)
 {
-       int status;
        struct ocrdma_pd *pd;
        struct ocrdma_qp *qp;
        struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        unsigned long flags;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
 
        attrs.qp_state = IB_QPS_ERR;
        pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
         * discarded until the old CQEs are discarded.
         */
        mutex_lock(&dev->dev_lock);
-       status = ocrdma_mbx_destroy_qp(dev, qp);
+       (void) ocrdma_mbx_destroy_qp(dev, qp);
 
        /*
         * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        kfree(qp->wqe_wr_id_tbl);
        kfree(qp->rqe_wr_id_tbl);
        kfree(qp);
-       return status;
+       return 0;
 }
 
 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
        else
                ud_hdr->qkey = wr->wr.ud.remote_qkey;
        ud_hdr->rsvd_ahid = ah->id;
+       if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
+               hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
 }
 
 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
        u64 fbo;
        struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
        struct ocrdma_mr *mr;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
 
        wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
 
-       if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
+       if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
                return -EINVAL;
 
        hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
        fast_reg->size_sge =
                get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
        mr = (struct ocrdma_mr *) (unsigned long)
-               qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
+               dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
        build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
        return 0;
 }
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
                        status = ocrdma_build_write(qp, hdr, wr);
                        break;
-               case IB_WR_RDMA_READ_WITH_INV:
-                       hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
                case IB_WR_RDMA_READ:
                        ocrdma_build_read(qp, hdr, wr);
                        break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
                                 bool *polled, bool *stop)
 {
        bool expand;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
                OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+       if (status < OCRDMA_MAX_CQE_ERR)
+               atomic_inc(&dev->cqe_err_stats[status]);
 
        /* when hw sq is empty, but rq is not empty, so we continue
         * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
                                int status)
 {
        bool expand;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+       if (status < OCRDMA_MAX_CQE_ERR)
+               atomic_inc(&dev->cqe_err_stats[status]);
 
        /* when hw_rq is empty, but wq is not empty, so continue
         * to keep the cqe to get the cq event again.
index c00ae093b6f881870867b8dac16af33efca4091b..ffd48bfc4923457e5383345acfa3620fa5f6a52f 100644 (file)
@@ -1082,12 +1082,6 @@ struct qib_devdata {
        /* control high-level access to EEPROM */
        struct mutex eep_lock;
        uint64_t traffic_wds;
-       /* active time is kept in seconds, but logged in hours */
-       atomic_t active_time;
-       /* Below are nominal shadow of EEPROM, new since last EEPROM update */
-       uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
-       uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
-       uint16_t eep_hrs;
        /*
         * masks for which bits of errs, hwerrs that cause
         * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
                    const void *buffer, int len);
 void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
 void qib_dump_lookup_output_queue(struct qib_devdata *);
 void qib_force_pio_avail_update(struct qib_devdata *);
 void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
  * Flush write combining store buffers (if present) and perform a write
  * barrier.
  */
+static inline void qib_flush_wc(void)
+{
 #if defined(CONFIG_X86_64)
-#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+       asm volatile("sfence" : : : "memory");
 #else
-#define qib_flush_wc() wmb() /* no reorder around wc flush */
+       wmb(); /* no reorder around wc flush */
 #endif
+}
 
 /* global module parameter variables */
 extern unsigned qib_ibmtu;
index 5670ace27c639adb351b9928c65ed081a599a910..4fb78abd8ba1ad69629a9752878d4aa30a4f2229 100644 (file)
@@ -257,7 +257,7 @@ struct qib_base_info {
 
        /* shared memory page for send buffer disarm status */
        __u64 spi_sendbuf_status;
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /*
  * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
         */
        __u64 spu_base_info;
 
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /* User commands. */
 
index 6abd3ed3cd51ecf2c0a21927b0cf4894f7c48941..5e75b43c596b608adfacfeb48ea955ffa914082d 100644 (file)
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
        DEBUGFS_FILE_CREATE(opcode_stats);
        DEBUGFS_FILE_CREATE(ctx_stats);
        DEBUGFS_FILE_CREATE(qp_stats);
-       return;
 }
 
 void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
index 5dfda4c5cc9c3b1fde36d02c55cd387b7e366542..8c34b23e5bf670b92b3d1cec4d7b9c9e93ba9a20 100644 (file)
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
                client_pool = dc->next;
        else
                /* None in pool, alloc and init */
-               dc = kmalloc(sizeof *dc, GFP_KERNEL);
+               dc = kmalloc(sizeof(*dc), GFP_KERNEL);
 
        if (dc) {
                dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        if (dd->userbase) {
                /* If user regs mapped, they are after send, so set limit. */
                u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+
                if (!dd->piovl15base)
                        snd_lim = dd->uregbase;
                krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        snd_bottom = dd->pio2k_bufbase;
        if (snd_lim == 0) {
                u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+
                snd_lim = snd_bottom + tot2k;
        }
        /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
        /* not very efficient, but it works for now */
        while (reg_addr < reg_end) {
                u64 data;
+
                if (copy_from_user(&data, uaddr, sizeof(data))) {
                        ret = -EFAULT;
                        goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
 
        if (!dd || !op)
                return -EINVAL;
-       olp = vmalloc(sizeof *olp);
+       olp = vmalloc(sizeof(*olp));
        if (!olp) {
                pr_err("vmalloc for observer failed\n");
                return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
                op = diag_get_observer(dd, *off);
                if (op) {
                        u32 offset = *off;
+
                        ret = op->hook(dd, op, offset, &data64, 0, use_32);
                }
                /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
                if (count == 4 || count == 8) {
                        u64 data64;
                        u32 offset = *off;
+
                        ret = copy_from_user(&data64, data, count);
                        if (ret) {
                                ret = -EFAULT;
index 5bee08f16d7438d2eba1668bdccee64265c9972d..f58fdc3d25a29a71909a22c772217c62a975b785 100644 (file)
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
 {
        static char iname[16];
 
-       snprintf(iname, sizeof iname, "infinipath%u", unit);
+       snprintf(iname, sizeof(iname), "infinipath%u", unit);
        return iname;
 }
 
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
                qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
                if (qp_num != QIB_MULTICAST_QPN) {
                        int ruc_res;
+
                        qp = qib_lookup_qpn(ibp, qp_num);
                        if (!qp)
                                goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
        rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
        if (dd->flags & QIB_NODMA_RTAIL) {
                u32 seq = qib_hdrget_seq(rhf_addr);
+
                if (seq != rcd->seq_cnt)
                        goto bail;
                hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 {
        struct qib_devdata *dd = ppd->dd;
+
        ppd->lid = lid;
        ppd->lmc = lmc;
 
index 4d5d71aaa2b4e53e319b3c9cc57be37291f3cdd7..311ee6c3dd5e8b2e39382ef1c5d2741040a47636 100644 (file)
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
 
        if (t && dd0->nguid > 1 && t <= dd0->nguid) {
                u8 oguid;
+
                dd->base_guid = dd0->base_guid;
                bguid = (u8 *) &dd->base_guid;
 
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
                 * This board has a Serial-prefix, which is stored
                 * elsewhere for backward-compatibility.
                 */
-               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-               snp[sizeof ifp->if_sprefix] = '\0';
+               memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
+               snp[sizeof(ifp->if_sprefix)] = '\0';
                len = strlen(snp);
                snp += len;
-               len = (sizeof dd->serial) - len;
-               if (len > sizeof ifp->if_serial)
-                       len = sizeof ifp->if_serial;
+               len = sizeof(dd->serial) - len;
+               if (len > sizeof(ifp->if_serial))
+                       len = sizeof(ifp->if_serial);
                memcpy(snp, ifp->if_serial, len);
-       } else
-               memcpy(dd->serial, ifp->if_serial,
-                      sizeof ifp->if_serial);
+       } else {
+               memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
+       }
        if (!strstr(ifp->if_comment, "Tested successfully"))
                qib_dev_err(dd,
                        "Board SN %s did not pass functional test: %s\n",
                        dd->serial, ifp->if_comment);
 
-       memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
-       /*
-        * Power-on (actually "active") hours are kept as little-endian value
-        * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-        * atomic_t while running.
-        */
-       atomic_set(&dd->active_time, 0);
-       dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
 done:
        vfree(buf);
 
 bail:;
 }
 
-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
-       void *buf;
-       struct qib_flash *ifp;
-       int len, hi_water;
-       uint32_t new_time, new_hrs;
-       u8 csum;
-       int ret, idx;
-       unsigned long flags;
-
-       /* first, check if we actually need to do anything. */
-       ret = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               if (dd->eep_st_new_errs[idx]) {
-                       ret = 1;
-                       break;
-               }
-       }
-       new_time = atomic_read(&dd->active_time);
-
-       if (ret == 0 && new_time < 3600)
-               goto bail;
-
-       /*
-        * The quick-check above determined that there is something worthy
-        * of logging, so get current contents and do a more detailed idea.
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        */
-       len = sizeof(struct qib_flash);
-       buf = vmalloc(len);
-       ret = 1;
-       if (!buf) {
-               qib_dev_err(dd,
-                       "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
-                       len);
-               goto bail;
-       }
-
-       /* Grab semaphore and read current EEPROM. If we get an
-        * error, let go, but if not, keep it until we finish write.
-        */
-       ret = mutex_lock_interruptible(&dd->eep_lock);
-       if (ret) {
-               qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-               goto free_bail;
-       }
-       ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
-       if (ret) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "Unable read EEPROM for logging\n");
-               goto free_bail;
-       }
-       ifp = (struct qib_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-                           csum, ifp->if_csum);
-               ret = 1;
-               goto free_bail;
-       }
-       hi_water = 0;
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               int new_val = dd->eep_st_new_errs[idx];
-               if (new_val) {
-                       /*
-                        * If we have seen any errors, add to EEPROM values
-                        * We need to saturate at 0xFF (255) and we also
-                        * would need to adjust the checksum if we were
-                        * trying to minimize EEPROM traffic
-                        * Note that we add to actual current count in EEPROM,
-                        * in case it was altered while we were running.
-                        */
-                       new_val += ifp->if_errcntp[idx];
-                       if (new_val > 0xFF)
-                               new_val = 0xFF;
-                       if (ifp->if_errcntp[idx] != new_val) {
-                               ifp->if_errcntp[idx] = new_val;
-                               hi_water = offsetof(struct qib_flash,
-                                                   if_errcntp) + idx;
-                       }
-                       /*
-                        * update our shadow (used to minimize EEPROM
-                        * traffic), to match what we are about to write.
-                        */
-                       dd->eep_st_errs[idx] = new_val;
-                       dd->eep_st_new_errs[idx] = 0;
-               }
-       }
-       /*
-        * Now update active-time. We would like to round to the nearest hour
-        * but unless atomic_t are sure to be proper signed ints we cannot,
-        * because we need to account for what we "transfer" to EEPROM and
-        * if we log an hour at 31 minutes, then we would need to set
-        * active_time to -29 to accurately count the _next_ hour.
-        */
-       if (new_time >= 3600) {
-               new_hrs = new_time / 3600;
-               atomic_sub((new_hrs * 3600), &dd->active_time);
-               new_hrs += dd->eep_hrs;
-               if (new_hrs > 0xFFFF)
-                       new_hrs = 0xFFFF;
-               dd->eep_hrs = new_hrs;
-               if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-                       ifp->if_powerhour[0] = new_hrs & 0xFF;
-                       hi_water = offsetof(struct qib_flash, if_powerhour);
-               }
-               if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-                       ifp->if_powerhour[1] = new_hrs >> 8;
-                       hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
-               }
-       }
-       /*
-        * There is a tiny possibility that we could somehow fail to write
-        * the EEPROM after updating our shadows, but problems from holding
-        * the spinlock too long are a much bigger issue.
-        */
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-       if (hi_water) {
-               /* we made some change to the data, uopdate cksum and write */
-               csum = flash_csum(ifp, 1);
-               ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
-       }
-       mutex_unlock(&dd->eep_lock);
-       if (ret)
-               qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-       vfree(buf);
-bail:
-       return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
-       uint new_val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       new_val = dd->eep_st_new_errs[eidx] + incr;
-       if (new_val > 255)
-               new_val = 255;
-       dd->eep_st_new_errs[eidx] = new_val;
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
index b15e34eeef685d510c781d25d08433e4b3e7c717..41937c6f888af13deadb6c7b25678cfc34596cf8 100644 (file)
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
                 * unless perhaps the user has mpin'ed the pages
                 * themselves.
                 */
-               qib_devinfo(dd->pcidev,
-                        "Failed to lock addr %p, %u pages: "
-                        "errno %d\n", (void *) vaddr, cnt, -ret);
+               qib_devinfo(
+                       dd->pcidev,
+                       "Failed to lock addr %p, %u pages: errno %d\n",
+                       (void *) vaddr, cnt, -ret);
                goto done;
        }
        for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
                        goto cleanup;
                }
                if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-                                tidmap, sizeof tidmap)) {
+                                tidmap, sizeof(tidmap))) {
                        ret = -EFAULT;
                        goto cleanup;
                }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
        }
 
        if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-                          sizeof tidmap)) {
+                          sizeof(tidmap))) {
                ret = -EFAULT;
                goto done;
        }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
                /* rcvegrbufs are read-only on the slave */
                if (vma->vm_flags & VM_WRITE) {
                        qib_devinfo(dd->pcidev,
-                                "Can't map eager buffers as "
-                                "writable (flags=%lx)\n", vma->vm_flags);
+                                "Can't map eager buffers as writable (flags=%lx)\n",
+                                vma->vm_flags);
                        ret = -EPERM;
                        goto bail;
                }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
         */
        if (weight >= qib_cpulist_count) {
                int cpu;
+
                cpu = find_first_zero_bit(qib_cpulist,
                                          qib_cpulist_count);
                if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
        if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
                uinfo->spu_userversion & 0xffff)) {
                qib_devinfo(dd->pcidev,
-                        "Mismatched user version (%d.%d) and driver "
-                        "version (%d.%d) while context sharing. Ensure "
-                        "that driver and library are from the same "
-                        "release.\n",
+                        "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
                         (int) (uinfo->spu_userversion >> 16),
                         (int) (uinfo->spu_userversion & 0xffff),
                         QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
        }
        if (!ppd) {
                u32 pidx = ctxt % dd->num_pports;
+
                if (usable(dd->pport + pidx))
                        ppd = dd->pport + pidx;
                else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
 
        if (alg == QIB_PORT_ALG_ACROSS) {
                unsigned inuse = ~0U;
+
                /* find device (with ACTIVE ports) with fewest ctxts in use */
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
                        unsigned cused = 0, cfree = 0, pusable = 0;
+
                        if (!dd)
                                continue;
                        if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
        } else {
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
+
                        if (dd) {
                                ret = choose_port_ctxt(fp, dd, port, uinfo);
                                if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
        }
        for (ndev = 0; ndev < devmax; ndev++) {
                struct qib_devdata *dd = qib_lookup(ndev);
+
                if (dd) {
                        if (pcibus_to_node(dd->pcidev->bus) < 0) {
                                ret = -EINVAL;
index 81854586c081fee37e0b3933a26d08e5e8b4e815..650897a8591e872f338d03994da3ed18e5a51bf7 100644 (file)
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
 {
        qib_stats.sps_ints = qib_sps_ints();
        return simple_read_from_buffer(buf, count, ppos, &qib_stats,
-                                      sizeof qib_stats);
+                                      sizeof(qib_stats));
 }
 
 /*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        return simple_read_from_buffer(buf, count, ppos, qib_statnames,
-               sizeof qib_statnames - 1); /* no null */
+               sizeof(qib_statnames) - 1); /* no null */
 }
 
 static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
        int ret, i;
 
        /* create the per-unit directory */
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
                          &simple_dir_operations, dd);
        if (ret) {
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!(d_unhashed(tmp) && tmp->d_inode)) {
+       if (!d_unhashed(tmp) && tmp->d_inode) {
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
                simple_unlink(parent->d_inode, tmp);
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
 
        root = dget(sb->s_root);
        mutex_lock(&root->d_inode->i_mutex);
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        dir = lookup_one_len(unit, root, strlen(unit));
 
        if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
                        const char *dev_name, void *data)
 {
        struct dentry *ret;
+
        ret = mount_single(fs_type, flags, data, qibfs_fill_super);
        if (!IS_ERR(ret))
                qib_super = ret->d_sb;
index d68266ac7619b896c49e500256c263ec1c3619c1..0d2ba59af30af66bce01ef8132c8182cc6e44a33 100644 (file)
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
         */
        mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
                ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
-       qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
                }
                if (crcs) {
                        u32 cntr = dd->cspec->lli_counter;
+
                        cntr += crcs;
                        if (cntr) {
                                if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
                        "irq is 0, BIOS error?  Interrupts won't work\n");
        else {
                int ret;
+
                ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
                                  QIB_DRV_NAME, dd);
                if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 
        qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
 {
        int ret = 0;
+
        if (!strncmp(what, "ibc", 3)) {
                ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
 static void set_6120_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
        dd->cspec->cregbase = (u64 __iomem *)
                ((char __iomem *) dd->kregbase + cregbase);
index 7dec89fdc1248dc69c3cab38069e5e2f32986ce3..22affda8af88eacbd11f21abab55ba299dfb0e0b 100644 (file)
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
        errs &= QLOGIC_IB_E_SDMAERRS;
 
        msg = dd->cspec->sdmamsgbuf;
-       qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+       qib_decode_7220_sdma_errs(ppd, errs, msg,
+               sizeof(dd->cspec->sdmamsgbuf));
        spin_lock_irqsave(&ppd->sdma_lock, flags);
 
        if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
 static void reenable_7220_chase(unsigned long opaque)
 {
        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
        ppd->cpspec->chase_timer.expires = 0;
        qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
                ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
                ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
 
-       qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 done:
        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
index a7eb32517a04bced55f9d1d5720b5d060d96e92b..ef97b71c8f7dd713a77401f593c6a10320a046e6 100644 (file)
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
 
 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation, \
+MODULE_PARM_DESC(long_attenuation,
                 "attenuation cutoff (dB) for long copper cable setup");
 
 static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
 static int  setup_txselect(const char *, struct kernel_param *);
 module_param_call(txselect, setup_txselect, param_get_string,
                  &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect, \
+MODULE_PARM_DESC(txselect,
                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 
 #define BOARD_QME7342 5
 #define BOARD_QMH7342 6
+#define BOARD_QMH7360 9
 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
                    BOARD_QMH7342)
 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        /* do these first, they are most important */
        if (errs & QIB_E_HARDWARE) {
                *msg = '\0';
-               qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        } else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        mask = QIB_E_HARDWARE;
        *msg = '\0';
 
-       err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+       err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
                   qib_7322error_msgs);
 
        /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
        *msg = '\0';
 
        if (errs & ~QIB_E_P_BITSEXTANT) {
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
                if (!*msg)
-                       snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+                       snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
                                 "no others");
                qib_dev_porterr(dd, ppd->port,
                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                /* determine cause, then write to clear */
                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
                           hdrchk_msgs);
                *msg = '\0';
                /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                         * isn't valid.  We don't want to confuse people, so
                         * we just don't print them, except at debug
                         */
-                       err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+                       err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                                   (errs & QIB_E_P_LINK_PKTERRS),
                                   qib_7322p_error_msgs);
                        *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                 * valid.  We don't want to confuse people, so we just
                 * don't print them, except at debug
                 */
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
                           qib_7322p_error_msgs);
                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
                *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
                if (dd->cspec->num_msix_entries) {
                        /* and same for MSIx */
                        u64 val = qib_read_kreg64(dd, kr_intgranted);
+
                        if (val)
                                qib_write_kreg(dd, kr_intgranted, val);
                }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
                int err;
                unsigned long flags;
                struct qib_pportdata *ppd = dd->pport;
+
                for (; pidx < dd->num_pports; ++pidx, ppd++) {
                        err = 0;
                        if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                qib_update_rhdrq_dca(rcd, cpu);
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                qib_update_sdma_dca(ppd, cpu);
        }
 }
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                dd = rcd->dd;
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                dd = ppd->dd;
        }
        qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                struct qib_pportdata *ppd;
                struct qib_qsfp_data *qd;
                u32 mask;
+
                if (!dd->pport[pidx].link_speed_supported)
                        continue;
                mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
                if (gpiostatus & dd->cspec->gpio_mask & mask) {
                        u64 pins;
+
                        qd = &ppd->cpspec->qsfp_data;
                        gpiostatus &= ~mask;
                        pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
        }
 
        /* Try to get MSIx interrupts */
-       memset(redirect, 0, sizeof redirect);
+       memset(redirect, 0, sizeof(redirect));
        mask = ~0ULL;
        msixnum = 0;
        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
                n = "InfiniPath_QME7362";
                dd->flags |= QIB_HAS_QSFP;
                break;
+       case BOARD_QMH7360:
+               n = "Intel IB QDR 1P FLR-QSFP Adptr";
+               dd->flags |= QIB_HAS_QSFP;
+               break;
        case 15:
                n = "InfiniPath_QLE7342_TEST";
                dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
         */
        for (i = 0; i < msix_entries; i++) {
                u64 vecaddr, vecdata;
+
                vecaddr = qib_read_kreg64(dd, 2 * i +
                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
                traffic_wds -= ppd->dd->traffic_wds;
                ppd->dd->traffic_wds += traffic_wds;
-               if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-                       atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
                                                QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
 {
        u64 newctrlb;
+
        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
                                    IBA7322_IBC_IBTA_1_2_MASK |
                                    IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
 
        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
        struct qib_devdata *dd;
        unsigned long val;
        char *n;
+
        if (strlen(str) >= MAX_ATTEN_LEN) {
                pr_info("txselect_values string too long\n");
                return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
        val = TIDFLOW_ERRBITS; /* these are W1C */
        for (i = 0; i < dd->cfgctxts; i++) {
                int flow;
+
                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
        }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 
        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
                struct qib_chippport_specific *cp = ppd->cpspec;
+
                ppd->link_speed_supported = features & PORT_SPD_CAP;
                features >>=  PORT_SPD_CAP_SHIFT;
                if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
                                ppd->vls_supported = IB_VL_VL0_7;
                        else {
                                qib_devinfo(dd->pcidev,
-                                           "Invalid num_vls %u for MTU %d "
-                                           ", using 4 VLs\n",
+                                           "Invalid num_vls %u for MTU %d , using 4 VLs\n",
                                            qib_num_cfg_vls, mtu);
                                ppd->vls_supported = IB_VL_VL0_3;
                                qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
        int ret = 0;
+
        if (ppd->dd->cspec->r1)
                ret = serdes_7322_init_old(ppd);
        else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
 
 static int qib_r_grab(struct qib_devdata *dd)
 {
-       u64 val;
-       val = SJA_EN;
+       u64 val = SJA_EN;
+
        qib_write_kreg(dd, kr_r_access, val);
        qib_read_kreg32(dd, kr_scratch);
        return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
 {
        u64 val;
        int timeout;
+
        for (timeout = 0; timeout < 100 ; ++timeout) {
                val = qib_read_kreg32(dd, kr_r_access);
                if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
                }
                if (inp) {
                        int tdi = inp[pos >> 3] >> (pos & 7);
+
                        val |= ((tdi & 1) << R_TDI_LSB);
                }
                qib_write_kreg(dd, kr_r_access, val);
index 729da39c49ed3fac472db472c3ac58b7766f8ada..2ee36953e234c46ff704bc6e5dfbed8339c09974 100644 (file)
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
         * Allocate full ctxtcnt array, rather than just cfgctxts, because
         * cleanup iterates across all possible ctxts.
         */
-       dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+       dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
        if (!dd->rcd) {
                qib_dev_err(dd,
                        "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
                        u8 hw_pidx, u8 port)
 {
        int size;
+
        ppd->dd = dd;
        ppd->hw_pidx = hw_pidx;
        ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
                ppd = dd->pport + pidx;
                if (!ppd->qib_wq) {
                        char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
                        snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
                                dd->unit, pidx);
                        ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
 
        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
                int mtu;
+
                if (lastfail)
                        ret = lastfail;
                ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
                qib_free_pportdata(ppd);
        }
 
-       qib_update_eeprom_log(dd);
 }
 
 /**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
        addr = vmalloc(cnt);
        if (!addr) {
                qib_devinfo(dd->pcidev,
-                        "Couldn't get memory for checking PIO perf,"
-                        " skipping\n");
+                        "Couldn't get memory for checking PIO perf, skipping\n");
                goto done;
        }
 
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
 
        if (!qib_cpulist_count) {
                u32 count = num_online_cpus();
+
                qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
                                      sizeof(long), GFP_KERNEL);
                if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
        if (!list_empty(&dd->list))
                list_del_init(&dd->list);
        ib_dealloc_device(&dd->verbs_dev.ibdev);
-       return ERR_PTR(ret);;
+       return ERR_PTR(ret);
 }
 
 /*
index f4918f2165ec72616d51a3bd9c461b33f6ed8548..086616d071b988e38cc813b09a9659d787195c62 100644 (file)
@@ -168,7 +168,6 @@ skip_ibchange:
        ppd->lastibcstat = ibcs;
        if (ev)
                signal_ib_event(ppd, ev);
-       return;
 }
 
 void qib_clear_symerror_on_linkup(unsigned long opaque)
index 3b9afccaaade824370f5c0ea0d6d6ceb519e6090..ad843c786e7212d0c89f90264bacb5cb6b8346a0 100644 (file)
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
        if (!mr->lkey_published)
                goto out;
        if (lkey == 0)
-               rcu_assign_pointer(dev->dma_mr, NULL);
+               RCU_INIT_POINTER(dev->dma_mr, NULL);
        else {
                r = lkey >> (32 - ib_qib_lkey_table_size);
-               rcu_assign_pointer(rkt->table[r], NULL);
+               RCU_INIT_POINTER(rkt->table[r], NULL);
        }
        qib_put_mr(mr);
        mr->lkey_published = 0;
index 636be117b57859e690fdb0704be6fe10b429511f..395f4046dba2054633ad41f4a0f67a4dbee57c63 100644 (file)
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
        data.trap_num = trap_num;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_257_258.lid1 = lid1;
        data.details.ntc_257_258.lid2 = lid2;
        data.details.ntc_257_258.key = cpu_to_be32(key);
        data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
        data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
        data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_256.lid = data.issuer_lid;
        data.details.ntc_256.method = smp->method;
        data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
                       hop_cnt);
        }
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_145.lid = data.issuer_lid;
        data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.local_changes = 1;
        data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 static int subn_get_nodedescription(struct ib_smp *smp,
index 8b73a11d571c1671ba6678565c5e34754a3ab411..146cf29a2e1db19a8293f2ecbf3f8348ce1732c2 100644 (file)
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
                                           void *obj) {
        struct qib_mmap_info *ip;
 
-       ip = kmalloc(sizeof *ip, GFP_KERNEL);
+       ip = kmalloc(sizeof(*ip), GFP_KERNEL);
        if (!ip)
                goto bail;
 
index a77fb4fb14e43c255e23a41b7c86877835a78ea3..c4473db46699b5f367a0fd1b9d9df92bb2723d50 100644 (file)
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
 
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
        for (; i < m; i++) {
-               mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
+               mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
                if (!mr->map[i])
                        goto bail;
        }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
                goto bail;
        }
 
-       mr = kzalloc(sizeof *mr, GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
        if (!mr)
                goto bail;
 
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
        if (size > PAGE_SIZE)
                return ERR_PTR(-EINVAL);
 
-       pl = kzalloc(sizeof *pl, GFP_KERNEL);
+       pl = kzalloc(sizeof(*pl), GFP_KERNEL);
        if (!pl)
                return ERR_PTR(-ENOMEM);
 
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+       fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
        if (!fmr)
                goto bail;
 
index 61a0046efb76ff9310b9a15f79dfe9dd11354c05..4758a3801ae8f916b6a96988b6c88ce5c344c588 100644 (file)
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
        /* We can't pass qib_msix_entry array to qib_msix_setup
         * so use a dummy msix_entry array and copy the allocated
         * irq back to the qib_msix_entry array. */
-       msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
+       msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
        if (!msix_entry)
                goto do_intx;
 
@@ -234,8 +234,10 @@ free_msix_entry:
        kfree(msix_entry);
 
 do_intx:
-       qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
-                       "falling back to INTx\n", nvec, ret);
+       qib_dev_err(
+               dd,
+               "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
+               nvec, ret);
        *msixcnt = 0;
        qib_enable_intx(dd->pcidev);
 }
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
 {
        int r;
+
        r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
                                   dd->pcibar0);
        if (r)
@@ -696,6 +699,7 @@ static void
 qib_pci_resume(struct pci_dev *pdev)
 {
        struct qib_devdata *dd = pci_get_drvdata(pdev);
+
        qib_devinfo(pdev, "QIB resume function called\n");
        pci_cleanup_aer_uncorrect_error_status(pdev);
        /*
index 6ddc0264aad2779ef327161c14a2ee9aef2e543f..4fa88ba2963e6ba21186ae5eb095ea531b741e97 100644 (file)
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
 
        if (rcu_dereference_protected(ibp->qp0,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp0, NULL);
+               RCU_INIT_POINTER(ibp->qp0, NULL);
        } else if (rcu_dereference_protected(ibp->qp1,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp1, NULL);
+               RCU_INIT_POINTER(ibp->qp1, NULL);
        } else {
                struct qib_qp *q;
                struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
                                lockdep_is_held(&dev->qpt_lock))) != NULL;
                                qpp = &q->next)
                        if (q == qp) {
-                               rcu_assign_pointer(*qpp,
+                               RCU_INIT_POINTER(*qpp,
                                        rcu_dereference_protected(qp->next,
                                         lockdep_is_held(&dev->qpt_lock)));
                                removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
        for (n = 0; n < dev->qp_table_size; n++) {
                qp = rcu_dereference_protected(dev->qp_table[n],
                        lockdep_is_held(&dev->qpt_lock));
-               rcu_assign_pointer(dev->qp_table[n], NULL);
+               RCU_INIT_POINTER(dev->qp_table[n], NULL);
 
                for (; qp; qp = rcu_dereference_protected(qp->next,
                                        lockdep_is_held(&dev->qpt_lock)))
index fa71b1e666c5414fbba2357fe531e75cabc7986c..5e27f76805e28af0c0e0acdc9864360f8cf09b41 100644 (file)
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
         * Module could take up to 2 Msec to respond to MOD_SEL, and there
         * is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
        else if (pass)
                qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
 
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
         * Module could take up to 2 Msec to respond to MOD_SEL,
         * and there is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
         * going away, and there is no way to tell if it is ready.
         * so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
                 * set the page to zero, Even if it already appears to be zero.
                 */
                u8 poke = 0;
+
                ret = qib_qsfp_write(ppd, 127, &poke, 1);
                udelay(50);
                if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
        udelay(20); /* Generous RST dwell */
 
        dd->f_gpio_mod(dd, mask, mask, mask);
-       return;
 }
 
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
 
        while (bidx < QSFP_DEFAULT_HDR_CNT) {
                int iidx;
+
                ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
                if (ret < 0)
                        goto bail;
index 2f2501890c4ea2b26a9ebd7a82755688605443e3..4544d6f88ad77c7f7c69fd6e6d4a138188f493b3 100644 (file)
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 4c07a8b34ffe27e2bfa89a19883fa25ef6aa64e7..f42bd0f47577a4557f47cac58de4a16b678b923d 100644 (file)
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
                struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 
                return ppd->guid;
-       } else
-               return ibp->guids[index - 1];
+       }
+       return ibp->guids[index - 1];
 }
 
 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
                goto serr;
        }
 
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        send_status = IB_WC_SUCCESS;
 
        release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
            status != IB_WC_SUCCESS) {
                struct ib_wc wc;
 
-               memset(&wc, 0, sizeof wc);
+               memset(&wc, 0, sizeof(wc));
                wc.wr_id = wqe->wr.wr_id;
                wc.status = status;
                wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 911205d3d5a0bf255fae65126740ab47eaa4a8ef..c72775f2721226868604b3770b1dbe23b7456dc0 100644 (file)
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
                 * it again during startup.
                 */
                u64 val;
+
                rst_val &= ~(1ULL);
                qib_write_kreg(dd, kr_hwerrmask,
                               dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                 * Both should be clear
                 */
                u64 newval = 0;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                /* Need to claim */
                u64 pollval;
                u64 newval = EPB_ACC_REQ | oct_sel;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
                        if (!sofar) {
                                /* Only set address at start of chunk */
                                int addrbyte = (addr + sofar) >> 8;
+
                                transval = csbit | EPB_MADDRH | addrbyte;
                                tries = epb_trans(dd, trans, transval,
                                                  &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
  * IRQ not set up at this point in init, so we poll.
  */
 #define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (30)
+#define TRIM_TMO (15)
 
 static int qib_sd_trimdone_poll(struct qib_devdata *dd)
 {
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
                        ret = 1;
                        break;
                }
-               msleep(10);
+               msleep(20);
        }
        if (trim_tmo >= TRIM_TMO) {
                qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
                dds_reg_map >>= 4;
                for (midx = 0; midx < DDS_ROWS; ++midx) {
                        u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+
                        data = dds_init_vals[midx].reg_vals[idx];
                        writeq(data, daddr);
                        mmiowb();
index 3c8e4e3caca6240175bbddb35fb304107179920e..81f56cdff2bc280c7a64f816a215b10b703d0ec4 100644 (file)
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
                container_of(device, struct qib_ibdev, ibdev.dev);
        struct qib_devdata *dd = dd_from_dev(dev);
 
-       buf[sizeof dd->serial] = '\0';
-       memcpy(buf, dd->serial, sizeof dd->serial);
+       buf[sizeof(dd->serial)] = '\0';
+       memcpy(buf, dd->serial, sizeof(dd->serial));
        strcat(buf, "\n");
        return strlen(buf);
 }
@@ -611,28 +611,6 @@ bail:
        return ret < 0 ? ret : count;
 }
 
-static ssize_t show_logged_errs(struct device *device,
-                               struct device_attribute *attr, char *buf)
-{
-       struct qib_ibdev *dev =
-               container_of(device, struct qib_ibdev, ibdev.dev);
-       struct qib_devdata *dd = dd_from_dev(dev);
-       int idx, count;
-
-       /* force consistency with actual EEPROM */
-       if (qib_update_eeprom_log(dd) != 0)
-               return -ENXIO;
-
-       count = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-                                  dd->eep_st_errs[idx],
-                                  idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
-       }
-
-       return count;
-}
-
 /*
  * Dump tempsense regs. in decimal, to ease shell-scripts.
  */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
        &dev_attr_nfreectxts,
        &dev_attr_serial,
        &dev_attr_boardversion,
-       &dev_attr_logged_errors,
        &dev_attr_tempsense,
        &dev_attr_localbus_info,
        &dev_attr_chip_reset,
index 647f7beb1b0a1669a841c8180a9d6f4f1722c4c9..f5698664419b430ac932fb396f7a35c11ca80816 100644 (file)
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
                udelay(2);
        else {
                int rise_usec;
+
                for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
                        if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
                                break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
 {
        int ret = 1;
+
        if (flags & QIB_TWSI_START)
                start_seq(dd);
 
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
        int sub_len;
        const u8 *bp = buffer;
        int max_wait_time, i;
-       int ret;
-       ret = 1;
+       int ret = 1;
 
        while (len > 0) {
                if (dev == QIB_TWSI_NO_DEV) {
index 31d3561400a49f6056eb6be5c082b05cc0edb0e4..eface3b3dacf6a3d49c6d67b6702de7d38abc8ab 100644 (file)
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
 
        for (i = 0; i < cnt; i++) {
                int which;
+
                if (!test_bit(i, mask))
                        continue;
                /*
index aaf7039f8ed2112041174d1b320a3d8205348c08..26243b722b5e979c1324471b6e17871d3ef22540 100644 (file)
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
         * present on the wire.
         */
        length = swqe->length;
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        wc.byte_len = length + sizeof(struct ib_grh);
 
        if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
index d2806cae234c254ef7e71ee58e03df5d2c1efdc9..3e0677c512768a7bd79a612fae251f0af70db512 100644 (file)
@@ -50,7 +50,7 @@
 /* expected size of headers (for dma_pool) */
 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
 /* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
 
 /*
  * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
                sdma_rb_node->refcount++;
        } else {
                int ret;
+
                sdma_rb_node = kmalloc(sizeof(
                        struct qib_user_sdma_rb_node), GFP_KERNEL);
                if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
 
                        if (tiddma) {
                                char *tidsm = (char *)pkt + pktsize;
+
                                cfur = copy_from_user(tidsm,
                                        iov[idx].iov_base, tidsmsize);
                                if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
                qib_user_sdma_hwqueue_clean(ppd);
                qib_user_sdma_queue_clean(ppd, pq);
                mutex_unlock(&pq->lock);
-               msleep(10);
+               msleep(20);
        }
 
        if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
 
        if (nfree && !list_empty(pktlist))
                goto retry;
-
-       return;
 }
 
 /* pq->lock must be held, get packets on the wire... */
index 9bcfbd8429804e237b23555a54bbd2b1d0fc5a27..4a3599890ea5f114655a34e472bee9197d73bd2c 100644 (file)
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
 done:
        if (dd->flags & QIB_USE_SPCL_TRIG) {
                u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
                qib_flush_wc();
                __raw_writel(0xaebecede, piobuf_orig + spcl_off);
        }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
         * we allow allocations of more than we report for this value.
         */
 
-       pd = kmalloc(sizeof *pd, GFP_KERNEL);
+       pd = kmalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
                goto bail;
        }
 
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+       ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
        if (!ah) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
        struct ib_ah *ah = ERR_PTR(-EINVAL);
        struct qib_qp *qp0;
 
-       memset(&attr, 0, sizeof attr);
+       memset(&attr, 0, sizeof(attr));
        attr.dlid = dlid;
        attr.port_num = ppd_from_ibp(ibp)->port;
        rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
        struct qib_ucontext *context;
        struct ib_ucontext *ret;
 
-       context = kmalloc(sizeof *context, GFP_KERNEL);
+       context = kmalloc(sizeof(*context), GFP_KERNEL);
        if (!context) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
 
        dev->qp_table_size = ib_qib_qp_table_size;
        get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
-       dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
+       dev->qp_table = kmalloc_array(
+                               dev->qp_table_size,
+                               sizeof(*dev->qp_table),
                                GFP_KERNEL);
        if (!dev->qp_table) {
                ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        for (i = 0; i < ppd->sdma_descq_cnt; i++) {
                struct qib_verbs_txreq *tx;
 
-               tx = kzalloc(sizeof *tx, GFP_KERNEL);
+               tx = kzalloc(sizeof(*tx), GFP_KERNEL);
                if (!tx) {
                        ret = -ENOMEM;
                        goto err_tx;
index dabb697b1c2a6025ae72927740582a1cb277f796..f8ea069a3eafca4078ab70848c3804867609143c 100644 (file)
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
 {
        struct qib_mcast_qp *mqp;
 
-       mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+       mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
        if (!mqp)
                goto bail;
 
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
 {
        struct qib_mcast *mcast;
 
-       mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+       mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
        if (!mcast)
                goto bail;
 
index 1d7281c5a02eaf633fed9d4b71d4bec2b9433915..81b225f2300aed34eab9b88077bd4c4097dceae2 100644 (file)
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        if (dd->piobcnt2k && dd->piobcnt4k) {
                /* 2 sizes for chip */
                unsigned long pio2kbase, pio4kbase;
+
                pio2kbase = dd->piobufbase & 0xffffffffUL;
                pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
                if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        }
 
        for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-               /* do nothing */ ;
+               ; /* do nothing */
 
        if (piolen != (1ULL << bits)) {
                piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
                piolen = 1ULL << (bits + 1);
        }
        if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               atmp = pioaddr & ~(piolen - 1);
+               u64 atmp = pioaddr & ~(piolen - 1);
+
                if (atmp < addr || (atmp + piolen) > (addr + len)) {
                        qib_dev_err(dd,
                                "No way to align address/size (%llx/%llx), no WC mtrr\n",
index 5ce26817e7e1d9b8d43126518188b3769e0c7f44..b47aea1094b2d9f7e434cf8a442f4b83b1a28f3c 100644 (file)
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
                           enum dma_data_direction dma_dir);
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data);
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir);
+
 int  iser_initialize_task_headers(struct iscsi_task *task,
                        struct iser_tx_desc *tx_desc);
 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
index 3821633f1065b15a9fcc1b1c36a74eda619aeae3..20e859a6f1a63b2fede51dcb2b02b9b6f638a36e 100644 (file)
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
 
-       if (!iser_conn->rx_descs)
-               goto free_login_buf;
-
        if (device->iser_free_rdma_reg_res)
                device->iser_free_rdma_reg_res(ib_conn);
 
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        /* make sure we never redo any unmapping */
        iser_conn->rx_descs = NULL;
 
-free_login_buf:
        iser_free_login_buf(iser_conn);
 }
 
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_IN]);
+                                                &iser_task->data[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_IN]);
+                                                &iser_task->prot[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
        }
 
        if (iser_task->dir[ISER_DIR_OUT]) {
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_OUT]);
+                                                &iser_task->data[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_OUT]);
+                                                &iser_task->prot[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
        }
 }
index abce9339333f0a8551a2e52600d2409edd39461c..341040bf09849d41e4e01c613e0d0e01683fceda 100644 (file)
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 }
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data)
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir)
 {
        struct ib_device *dev;
 
        dev = iser_task->iser_conn->ib_conn.device->ib_device;
-       ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+       ib_dma_unmap_sg(dev, data->buf, data->size, dir);
 }
 
 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
                iser_data_buf_dump(mem, ibdev);
 
        /* unmap the command data before accessing it */
-       iser_dma_unmap_task_data(iser_task, mem);
+       iser_dma_unmap_task_data(iser_task, mem,
+                                (cmd_dir == ISER_DIR_OUT) ?
+                                DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        /* allocate copy buf, if we are writing, copy the */
        /* unaligned scatterlist, dma map the copy        */
index 695a2704bd4380acafbc04c4ae7f0ecd96bc95a0..4065abe28829f78356c5ee7cd76ffa7b7330b3d5 100644 (file)
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
 /**
  * iser_free_ib_conn_res - release IB related resources
  * @iser_conn: iser connection struct
- * @destroy_device: indicator if we need to try to release
- *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
- *     will use this.
+ * @destroy: indicator if we need to try to release the
+ *     iser device and memory regoins pool (only iscsi
+ *     shutdown and DEVICE_REMOVAL will use this).
  *
  * This routine is called with the iser state mutex held
  * so the cm_id removal is out of here. It is Safe to
  * be invoked multiple times.
  */
 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
-                                 bool destroy_device)
+                                 bool destroy)
 {
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
        iser_info("freeing conn %p cma_id %p qp %p\n",
                  iser_conn, ib_conn->cma_id, ib_conn->qp);
 
-       iser_free_rx_descriptors(iser_conn);
-
        if (ib_conn->qp != NULL) {
                ib_conn->comp->active_qps--;
                rdma_destroy_qp(ib_conn->cma_id);
                ib_conn->qp = NULL;
        }
 
-       if (destroy_device && device != NULL) {
-               iser_device_try_release(device);
-               ib_conn->device = NULL;
+       if (destroy) {
+               if (iser_conn->rx_descs)
+                       iser_free_rx_descriptors(iser_conn);
+
+               if (device != NULL) {
+                       iser_device_try_release(device);
+                       ib_conn->device = NULL;
+               }
        }
 }
 
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
        mutex_unlock(&ig.connlist_mutex);
 
        mutex_lock(&iser_conn->state_mutex);
+       /* In case we endup here without ep_disconnect being invoked. */
        if (iser_conn->state != ISER_CONN_DOWN) {
                iser_warn("iser conn %p state %d, expected state down.\n",
                          iser_conn, iser_conn->state);
+               iscsi_destroy_endpoint(iser_conn->ep);
                iser_conn->state = ISER_CONN_DOWN;
        }
        /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
 }
 
 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
-                                bool destroy_device)
+                                bool destroy)
 {
        struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
 
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
         * and flush errors.
         */
        iser_disconnected_handler(cma_id);
-       iser_free_ib_conn_res(iser_conn, destroy_device);
+       iser_free_ib_conn_res(iser_conn, destroy);
        complete(&iser_conn->ib_completion);
 };
 
index dafb3c531f96f7ae61e70ff9f37d9324bee687ff..075b19cc78e89d11d73ba19bcffcd68c18323907 100644 (file)
@@ -38,7 +38,7 @@
 #define ISER_MAX_CQ_LEN                (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
                                 ISERT_MAX_CONN)
 
-int isert_debug_level = 0;
+static int isert_debug_level;
 module_param_named(debug_level, isert_debug_level, int, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
 
@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
                isert_err("ib_post_recv() failed with ret: %d\n", ret);
                isert_conn->post_recv_buf_count -= count;
        } else {
-               isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
+               isert_dbg("Posted %d RX buffers\n", count);
                isert_conn->conn_rx_desc_head = rx_head;
        }
        return ret;
@@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
        struct iscsi_conn *conn = isert_conn->conn;
        u32 payload_length = ntoh24(hdr->dlength);
        int rc;
-       unsigned char *text_in;
+       unsigned char *text_in = NULL;
 
        rc = iscsit_setup_text_cmd(conn, cmd, hdr);
        if (rc < 0)
                return rc;
 
-       text_in = kzalloc(payload_length, GFP_KERNEL);
-       if (!text_in) {
-               isert_err("Unable to allocate text_in of payload_length: %u\n",
-                         payload_length);
-               return -ENOMEM;
+       if (payload_length) {
+               text_in = kzalloc(payload_length, GFP_KERNEL);
+               if (!text_in) {
+                       isert_err("Unable to allocate text_in of payload_length: %u\n",
+                                 payload_length);
+                       return -ENOMEM;
+               }
        }
        cmd->text_in_ptr = text_in;
 
@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
                break;
        case ISCSI_OP_TEXT:
-               cmd = isert_allocate_cmd(conn);
-               if (!cmd)
-                       break;
+               if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+                       cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+                       if (!cmd)
+                               break;
+               } else {
+                       cmd = isert_allocate_cmd(conn);
+                       if (!cmd)
+                               break;
+               }
 
                isert_cmd = iscsit_priv_cmd(cmd);
                ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        struct isert_conn *isert_conn = isert_cmd->conn;
        struct iscsi_conn *conn = isert_conn->conn;
        struct isert_device *device = isert_conn->conn_device;
+       struct iscsi_text_rsp *hdr;
 
        isert_dbg("Cmd %p\n", isert_cmd);
 
@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        case ISCSI_OP_REJECT:
        case ISCSI_OP_NOOP_OUT:
        case ISCSI_OP_TEXT:
+               hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+               /* If the continue bit is on, keep the command alive */
+               if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
+                       break;
+
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
                        list_del_init(&cmd->i_conn_node);
@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
-                       isert_dbg("Calling transport_generic_free_cmd from"
-                                " isert_put_cmd for 0x%02x\n",
+                       isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
                                 cmd->iscsi_opcode);
                        transport_generic_free_cmd(&cmd->se_cmd, 0);
                        break;
@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        }
        isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       isert_dbg("conn %p Text Reject\n", isert_conn);
+       isert_dbg("conn %p Text Response\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -3136,7 +3149,7 @@ accept_wait:
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               isert_dbg("np_thread_state %d for isert_accept_np\n",
+               isert_dbg("np_thread_state %d\n",
                         np->np_thread_state);
                /**
                 * No point in stalling here when np_thread
@@ -3320,7 +3333,8 @@ static int __init isert_init(void)
 {
        int ret;
 
-       isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+       isert_comp_wq = alloc_workqueue("isert_comp_wq",
+                                       WQ_UNBOUND | WQ_HIGHPRI, 0);
        if (!isert_comp_wq) {
                isert_err("Unable to allocate isert_comp_wq\n");
                ret = -ENOMEM;
index eb694ddad79fe069b6d2f796004a5e8acc77e833..6e0a477681e90b0efe53330de1b9118000bcc85a 100644 (file)
@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess)
        DECLARE_COMPLETION_ONSTACK(release_done);
        struct srpt_rdma_ch *ch;
        struct srpt_device *sdev;
-       int res;
+       unsigned long res;
 
        ch = se_sess->fabric_sess_ptr;
        WARN_ON(ch->sess != se_sess);
@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess)
        spin_unlock_irq(&sdev->spinlock);
 
        res = wait_for_completion_timeout(&release_done, 60 * HZ);
-       WARN_ON(res <= 0);
+       WARN_ON(res == 0);
 }
 
 /**
index b78425765d3eb12ccd02fd1c3dc644a1956110a1..d09cefa379316a302df754394e342cc6fe4b088c 100644 (file)
@@ -535,8 +535,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
                }
        }
  fail2:        for (i = 0; i < 2; i++)
-               if (port->adi[i].dev)
-                       input_free_device(port->adi[i].dev);
+               input_free_device(port->adi[i].dev);
        gameport_close(gameport);
  fail1:        gameport_set_drvdata(gameport, NULL);
        kfree(port);
index a89488aa1aa4d0ea0fefcd796646f04dbe851c22..fcef5d1365e2a3034e3a9544f7e403a0680a5058 100644 (file)
@@ -345,13 +345,11 @@ static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
 {
        const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
        struct input_dev *input_dev = keypad->input_dev;
-       const struct matrix_keymap_data *keymap_data =
-                               pdata ? pdata->matrix_keymap_data : NULL;
        unsigned short keycode;
        int i;
        int error;
 
-       error = matrix_keypad_build_keymap(keymap_data, NULL,
+       error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
                                           pdata->matrix_key_rows,
                                           pdata->matrix_key_cols,
                                           keypad->keycodes, input_dev);
index 3f4351579372ebb87aafe5af7df505ac2f485361..a0fc18fdfc0c62263c21537a2f8105a3820846e3 100644 (file)
@@ -7,29 +7,37 @@
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
 #include <linux/slab.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 #include <asm/portmux.h>
-#include <asm/bfin_rotary.h>
 
-static const u16 per_cnt[] = {
-       P_CNT_CUD,
-       P_CNT_CDG,
-       P_CNT_CZM,
-       0
-};
+#define CNT_CONFIG_OFF         0       /* CNT Config Offset */
+#define CNT_IMASK_OFF          4       /* CNT Interrupt Mask Offset */
+#define CNT_STATUS_OFF         8       /* CNT Status Offset */
+#define CNT_COMMAND_OFF                12      /* CNT Command Offset */
+#define CNT_DEBOUNCE_OFF       16      /* CNT Debounce Offset */
+#define CNT_COUNTER_OFF                20      /* CNT Counter Offset */
+#define CNT_MAX_OFF            24      /* CNT Maximum Count Offset */
+#define CNT_MIN_OFF            28      /* CNT Minimum Count Offset */
 
 struct bfin_rot {
        struct input_dev *input;
+       void __iomem *base;
        int irq;
        unsigned int up_key;
        unsigned int down_key;
        unsigned int button_key;
        unsigned int rel_code;
+
+       unsigned short mode;
+       unsigned short debounce;
+
        unsigned short cnt_config;
        unsigned short cnt_imask;
        unsigned short cnt_debounce;
@@ -59,18 +67,17 @@ static void report_rotary_event(struct bfin_rot *rotary, int delta)
 
 static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
 {
-       struct platform_device *pdev = dev_id;
-       struct bfin_rot *rotary = platform_get_drvdata(pdev);
+       struct bfin_rot *rotary = dev_id;
        int delta;
 
-       switch (bfin_read_CNT_STATUS()) {
+       switch (readw(rotary->base + CNT_STATUS_OFF)) {
 
        case ICII:
                break;
 
        case UCII:
        case DCII:
-               delta = bfin_read_CNT_COUNTER();
+               delta = readl(rotary->base + CNT_COUNTER_OFF);
                if (delta)
                        report_rotary_event(rotary, delta);
                break;
@@ -83,16 +90,52 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
                break;
        }
 
-       bfin_write_CNT_COMMAND(W1LCNT_ZERO);    /* Clear COUNTER */
-       bfin_write_CNT_STATUS(-1);      /* Clear STATUS */
+       writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */
+       writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */
 
        return IRQ_HANDLED;
 }
 
+static int bfin_rotary_open(struct input_dev *input)
+{
+       struct bfin_rot *rotary = input_get_drvdata(input);
+       unsigned short val;
+
+       if (rotary->mode & ROT_DEBE)
+               writew(rotary->debounce & DPRESCALE,
+                       rotary->base + CNT_DEBOUNCE_OFF);
+
+       writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF);
+
+       val = UCIE | DCIE;
+       if (rotary->button_key)
+               val |= CZMIE;
+       writew(val, rotary->base + CNT_IMASK_OFF);
+
+       writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF);
+
+       return 0;
+}
+
+static void bfin_rotary_close(struct input_dev *input)
+{
+       struct bfin_rot *rotary = input_get_drvdata(input);
+
+       writew(0, rotary->base + CNT_CONFIG_OFF);
+       writew(0, rotary->base + CNT_IMASK_OFF);
+}
+
+static void bfin_rotary_free_action(void *data)
+{
+       peripheral_free_list(data);
+}
+
 static int bfin_rotary_probe(struct platform_device *pdev)
 {
-       struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev);
        struct bfin_rot *rotary;
+       struct resource *res;
        struct input_dev *input;
        int error;
 
@@ -102,18 +145,37 @@ static int bfin_rotary_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       error = peripheral_request_list(per_cnt, dev_name(&pdev->dev));
-       if (error) {
-               dev_err(&pdev->dev, "requesting peripherals failed\n");
-               return error;
+       if (pdata->pin_list) {
+               error = peripheral_request_list(pdata->pin_list,
+                                               dev_name(&pdev->dev));
+               if (error) {
+                       dev_err(dev, "requesting peripherals failed: %d\n",
+                               error);
+                       return error;
+               }
+
+               error = devm_add_action(dev, bfin_rotary_free_action,
+                                       pdata->pin_list);
+               if (error) {
+                       dev_err(dev, "setting cleanup action failed: %d\n",
+                               error);
+                       peripheral_free_list(pdata->pin_list);
+                       return error;
+               }
        }
 
-       rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL);
-       input = input_allocate_device();
-       if (!rotary || !input) {
-               error = -ENOMEM;
-               goto out1;
-       }
+       rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL);
+       if (!rotary)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       rotary->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(rotary->base))
+               return PTR_ERR(rotary->base);
+
+       input = devm_input_allocate_device(dev);
+       if (!input)
+               return -ENOMEM;
 
        rotary->input = input;
 
@@ -122,9 +184,8 @@ static int bfin_rotary_probe(struct platform_device *pdev)
        rotary->button_key = pdata->rotary_button_key;
        rotary->rel_code = pdata->rotary_rel_code;
 
-       error = rotary->irq = platform_get_irq(pdev, 0);
-       if (error < 0)
-               goto out1;
+       rotary->mode = pdata->mode;
+       rotary->debounce = pdata->debounce;
 
        input->name = pdev->name;
        input->phys = "bfin-rotary/input0";
@@ -137,6 +198,9 @@ static int bfin_rotary_probe(struct platform_device *pdev)
        input->id.product = 0x0001;
        input->id.version = 0x0100;
 
+       input->open = bfin_rotary_open;
+       input->close = bfin_rotary_close;
+
        if (rotary->up_key) {
                __set_bit(EV_KEY, input->evbit);
                __set_bit(rotary->up_key, input->keybit);
@@ -151,75 +215,43 @@ static int bfin_rotary_probe(struct platform_device *pdev)
                __set_bit(rotary->button_key, input->keybit);
        }
 
-       error = request_irq(rotary->irq, bfin_rotary_isr,
-                           0, dev_name(&pdev->dev), pdev);
+       /* Quiesce the device before requesting irq */
+       bfin_rotary_close(input);
+
+       rotary->irq = platform_get_irq(pdev, 0);
+       if (rotary->irq < 0) {
+               dev_err(dev, "No rotary IRQ specified\n");
+               return -ENOENT;
+       }
+
+       error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr,
+                                0, dev_name(dev), rotary);
        if (error) {
-               dev_err(&pdev->dev,
-                       "unable to claim irq %d; error %d\n",
+               dev_err(dev, "unable to claim irq %d; error %d\n",
                        rotary->irq, error);
-               goto out1;
+               return error;
        }
 
        error = input_register_device(input);
        if (error) {
-               dev_err(&pdev->dev,
-                       "unable to register input device (%d)\n", error);
-               goto out2;
+               dev_err(dev, "unable to register input device (%d)\n", error);
+               return error;
        }
 
-       if (pdata->rotary_button_key)
-               bfin_write_CNT_IMASK(CZMIE);
-
-       if (pdata->mode & ROT_DEBE)
-               bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);
-
-       if (pdata->mode)
-               bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
-                                       (pdata->mode & ~CNTE));
-
-       bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
-       bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);
-
        platform_set_drvdata(pdev, rotary);
        device_init_wakeup(&pdev->dev, 1);
 
        return 0;
-
-out2:
-       free_irq(rotary->irq, pdev);
-out1:
-       input_free_device(input);
-       kfree(rotary);
-       peripheral_free_list(per_cnt);
-
-       return error;
 }
 
-static int bfin_rotary_remove(struct platform_device *pdev)
-{
-       struct bfin_rot *rotary = platform_get_drvdata(pdev);
-
-       bfin_write_CNT_CONFIG(0);
-       bfin_write_CNT_IMASK(0);
-
-       free_irq(rotary->irq, pdev);
-       input_unregister_device(rotary->input);
-       peripheral_free_list(per_cnt);
-
-       kfree(rotary);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int bfin_rotary_suspend(struct device *dev)
+static int __maybe_unused bfin_rotary_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-       rotary->cnt_config = bfin_read_CNT_CONFIG();
-       rotary->cnt_imask = bfin_read_CNT_IMASK();
-       rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE();
+       rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF);
+       rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF);
+       rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF);
 
        if (device_may_wakeup(&pdev->dev))
                enable_irq_wake(rotary->irq);
@@ -227,38 +259,32 @@ static int bfin_rotary_suspend(struct device *dev)
        return 0;
 }
 
-static int bfin_rotary_resume(struct device *dev)
+static int __maybe_unused bfin_rotary_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-       bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce);
-       bfin_write_CNT_IMASK(rotary->cnt_imask);
-       bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE);
+       writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF);
+       writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF);
+       writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF);
 
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(rotary->irq);
 
        if (rotary->cnt_config & CNTE)
-               bfin_write_CNT_CONFIG(rotary->cnt_config);
+               writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF);
 
        return 0;
 }
 
-static const struct dev_pm_ops bfin_rotary_pm_ops = {
-       .suspend        = bfin_rotary_suspend,
-       .resume         = bfin_rotary_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops,
+                        bfin_rotary_suspend, bfin_rotary_resume);
 
 static struct platform_driver bfin_rotary_device_driver = {
        .probe          = bfin_rotary_probe,
-       .remove         = bfin_rotary_remove,
        .driver         = {
                .name   = "bfin-rotary",
-#ifdef CONFIG_PM
                .pm     = &bfin_rotary_pm_ops,
-#endif
        },
 };
 module_platform_driver(bfin_rotary_device_driver);
index 79cc0f79896fcb47e9ed31faeed02339ffe5ccb4..e8e010a85484ae3d42003bc3c0742f909b8946f6 100644 (file)
@@ -195,7 +195,7 @@ static int soc_button_probe(struct platform_device *pdev)
 
 static struct soc_button_info soc_button_PNP0C40[] = {
        { "power", 0, EV_KEY, KEY_POWER, false, true },
-       { "home", 1, EV_KEY, KEY_HOME, false, true },
+       { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
        { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
        { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
        { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
index f205b8be2ce4ecd2395c75dc8fc990513f3fff48..d28726a0ef858e252948d2e5f2fd009bfd6c5506 100644 (file)
@@ -99,36 +99,58 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
-#define ALPS_IS_RUSHMORE       0x100   /* device is a rushmore */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
-       { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Toshiba Salellite Pro M10 */
-       { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 },                           /* UMAX-530T */
-       { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },                           /* HP ze1115 */
-       { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Fujitsu Siemens S6010 */
-       { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL },                  /* Toshiba Satellite S2400-103 */
-       { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 },                /* NEC Versa L320 */
-       { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D800 */
-       { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT },              /* ThinkPad R61 8918-5QG */
-       { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Ahtec Laptop */
-       { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* XXX */
-       { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
-       { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D600 */
+       { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },      /* Toshiba Salellite Pro M10 */
+       { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } },                               /* UMAX-530T */
+       { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },                               /* HP ze1115 */
+       { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },            /* Fujitsu Siemens S6010 */
+       { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } },              /* Toshiba Satellite S2400-103 */
+       { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } },            /* NEC Versa L320 */
+       { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },      /* Dell Latitude D800 */
+       { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } },          /* ThinkPad R61 8918-5QG */
+       { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },            /* Ahtec Laptop */
+
+       /*
+        * XXX This entry is suspicious. First byte has zero lower nibble,
+        * which is what a normal mouse would report. Also, the value 0x0e
+        * isn't valid per PS/2 spec.
+        */
+       { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+
+       { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+       { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } },      /* Dell Latitude D600 */
        /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
-       { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
-       { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT },              /* Dell XT2 */
-       { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },           /* Dell Vostro 1400 */
-       { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },                            /* Toshiba Tecra A11-11L */
-       { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
+       { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },
+       { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } },          /* Dell XT2 */
+       { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } },       /* Dell Vostro 1400 */
+       { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },                          /* Toshiba Tecra A11-11L */
+       { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } },
+};
+
+static const struct alps_protocol_info alps_v3_protocol_data = {
+       ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v3_rushmore_data = {
+       ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v5_protocol_data = {
+       ALPS_PROTO_V5, 0xc8, 0xd8, 0
+};
+
+static const struct alps_protocol_info alps_v7_protocol_data = {
+       ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
 };
 
 static void alps_set_abs_params_st(struct alps_data *priv,
@@ -136,12 +158,6 @@ static void alps_set_abs_params_st(struct alps_data *priv,
 static void alps_set_abs_params_mt(struct alps_data *priv,
                                   struct input_dev *dev1);
 
-/*
- * XXX - this entry is suspicious. First byte has zero lower nibble,
- * which is what a normal mouse would report. Also, the value 0x0e
- * isn't valid per PS/2 spec.
- */
-
 /* Packet formats are described in Documentation/input/alps.txt */
 
 static bool alps_is_valid_first_byte(struct alps_data *priv,
@@ -150,8 +166,7 @@ static bool alps_is_valid_first_byte(struct alps_data *priv,
        return (data & priv->mask0) == priv->byte0;
 }
 
-static void alps_report_buttons(struct psmouse *psmouse,
-                               struct input_dev *dev1, struct input_dev *dev2,
+static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2,
                                int left, int right, int middle)
 {
        struct input_dev *dev;
@@ -161,20 +176,21 @@ static void alps_report_buttons(struct psmouse *psmouse,
         * other device (dev2) then this event should be also
         * sent through that device.
         */
-       dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_LEFT, left);
 
-       dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_RIGHT, right);
 
-       dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_MIDDLE, middle);
 
        /*
         * Sync the _other_ device now, we'll do the first
         * device later once we report the rest of the events.
         */
-       input_sync(dev2);
+       if (dev2)
+               input_sync(dev2);
 }
 
 static void alps_process_packet_v1_v2(struct psmouse *psmouse)
@@ -221,13 +237,13 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                input_report_rel(dev2, REL_X,  (x > 383 ? (x - 768) : x));
                input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
 
-               alps_report_buttons(psmouse, dev2, dev, left, right, middle);
+               alps_report_buttons(dev2, dev, left, right, middle);
 
                input_sync(dev2);
                return;
        }
 
-       alps_report_buttons(psmouse, dev, dev2, left, right, middle);
+       alps_report_buttons(dev, dev2, left, right, middle);
 
        /* Convert hardware tap to a reasonable Z value */
        if (ges && !fin)
@@ -412,7 +428,7 @@ static int alps_process_bitmap(struct alps_data *priv,
                (2 * (priv->y_bits - 1));
 
        /* y-bitmap order is reversed, except on rushmore */
-       if (!(priv->flags & ALPS_IS_RUSHMORE)) {
+       if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) {
                fields->mt[0].y = priv->y_max - fields->mt[0].y;
                fields->mt[1].y = priv->y_max - fields->mt[1].y;
        }
@@ -648,7 +664,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
                 */
                if (f->is_mp) {
                        fingers = f->fingers;
-                       if (priv->proto_version == ALPS_PROTO_V3) {
+                       if (priv->proto_version == ALPS_PROTO_V3 ||
+                           priv->proto_version == ALPS_PROTO_V3_RUSHMORE) {
                                if (alps_process_bitmap(priv, f) == 0)
                                        fingers = 0; /* Use st data */
 
@@ -892,34 +909,6 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
                                          unsigned char *pkt,
                                          unsigned char pkt_id)
 {
-       /*
-        *       packet-fmt    b7   b6    b5   b4   b3   b2   b1   b0
-        * Byte0 TWO & MULTI    L    1     R    M    1 Y0-2 Y0-1 Y0-0
-        * Byte0 NEW            L    1  X1-5    1    1 Y0-2 Y0-1 Y0-0
-        * Byte1            Y0-10 Y0-9  Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
-        * Byte2            X0-11    1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
-        * Byte3            X1-11    1  X0-4 X0-3    1 X0-2 X0-1 X0-0
-        * Byte4 TWO        X1-10  TWO  X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
-        * Byte4 MULTI      X1-10  TWO  X1-9 X1-8 X1-7 X1-6 Y1-5    1
-        * Byte4 NEW        X1-10  TWO  X1-9 X1-8 X1-7 X1-6    0    0
-        * Byte5 TWO & NEW  Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
-        * Byte5 MULTI      Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6  F-1  F-0
-        * L:         Left button
-        * R / M:     Non-clickpads: Right / Middle button
-        *            Clickpads: When > 2 fingers are down, and some fingers
-        *            are in the button area, then the 2 coordinates reported
-        *            are for fingers outside the button area and these report
-        *            extra fingers being present in the right / left button
-        *            area. Note these fingers are not added to the F field!
-        *            so if a TWO packet is received and R = 1 then there are
-        *            3 fingers down, etc.
-        * TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
-        *            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
-        *               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
-        *               in NEW fmt
-        * F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
-        */
-
        mt[0].x = ((pkt[2] & 0x80) << 4);
        mt[0].x |= ((pkt[2] & 0x3F) << 5);
        mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -1044,17 +1033,6 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
                return;
        }
 
-       /*
-        *        b7 b6 b5 b4 b3 b2 b1 b0
-        * Byte0   0  1  0  0  1  0  0  0
-        * Byte1   1  1  *  *  1  M  R  L
-        * Byte2  X7  1 X5 X4 X3 X2 X1 X0
-        * Byte3  Z6  1 Y6 X6  1 Y2 Y1 Y0
-        * Byte4  Y7  0 Y5 Y4 Y3  1  1  0
-        * Byte5 T&P  0 Z5 Z4 Z3 Z2 Z1 Z0
-        * M / R / L: Middle / Right / Left button
-        */
-
        x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
        y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
            ((packet[3] & 0x20) << 1);
@@ -1107,23 +1085,89 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
                alps_process_touchpad_packet_v7(psmouse);
 }
 
-static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+static DEFINE_MUTEX(alps_mutex);
+
+static void alps_register_bare_ps2_mouse(struct work_struct *work)
+{
+       struct alps_data *priv =
+               container_of(work, struct alps_data, dev3_register_work.work);
+       struct psmouse *psmouse = priv->psmouse;
+       struct input_dev *dev3;
+       int error = 0;
+
+       mutex_lock(&alps_mutex);
+
+       if (priv->dev3)
+               goto out;
+
+       dev3 = input_allocate_device();
+       if (!dev3) {
+               psmouse_err(psmouse, "failed to allocate secondary device\n");
+               error = -ENOMEM;
+               goto out;
+       }
+
+       snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
+                psmouse->ps2dev.serio->phys,
+                (priv->dev2 ? "input2" : "input1"));
+       dev3->phys = priv->phys3;
+
+       /*
+        * format of input device name is: "protocol vendor name"
+        * see function psmouse_switch_protocol() in psmouse-base.c
+        */
+       dev3->name = "PS/2 ALPS Mouse";
+
+       dev3->id.bustype = BUS_I8042;
+       dev3->id.vendor  = 0x0002;
+       dev3->id.product = PSMOUSE_PS2;
+       dev3->id.version = 0x0000;
+       dev3->dev.parent = &psmouse->ps2dev.serio->dev;
+
+       input_set_capability(dev3, EV_REL, REL_X);
+       input_set_capability(dev3, EV_REL, REL_Y);
+       input_set_capability(dev3, EV_KEY, BTN_LEFT);
+       input_set_capability(dev3, EV_KEY, BTN_RIGHT);
+       input_set_capability(dev3, EV_KEY, BTN_MIDDLE);
+
+       __set_bit(INPUT_PROP_POINTER, dev3->propbit);
+
+       error = input_register_device(dev3);
+       if (error) {
+               psmouse_err(psmouse,
+                           "failed to register secondary device: %d\n",
+                           error);
+               input_free_device(dev3);
+               goto out;
+       }
+
+       priv->dev3 = dev3;
+
+out:
+       /*
+        * Save the error code so that we can detect that we
+        * already tried to create the device.
+        */
+       if (error)
+               priv->dev3 = ERR_PTR(error);
+
+       mutex_unlock(&alps_mutex);
+}
+
+static void alps_report_bare_ps2_packet(struct input_dev *dev,
                                        unsigned char packet[],
                                        bool report_buttons)
 {
-       struct alps_data *priv = psmouse->private;
-       struct input_dev *dev2 = priv->dev2;
-
        if (report_buttons)
-               alps_report_buttons(psmouse, dev2, psmouse->dev,
+               alps_report_buttons(dev, NULL,
                                packet[0] & 1, packet[0] & 2, packet[0] & 4);
 
-       input_report_rel(dev2, REL_X,
+       input_report_rel(dev, REL_X,
                packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
-       input_report_rel(dev2, REL_Y,
+       input_report_rel(dev, REL_Y,
                packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
 
-       input_sync(dev2);
+       input_sync(dev);
 }
 
 static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
@@ -1188,8 +1232,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
                 * de-synchronization.
                 */
 
-               alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
-                                           false);
+               alps_report_bare_ps2_packet(priv->dev2,
+                                           &psmouse->packet[3], false);
 
                /*
                 * Continue with the standard ALPS protocol handling,
@@ -1245,9 +1289,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
         * properly we only do this if the device is fully synchronized.
         */
        if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+
+               /* Register dev3 mouse if we received PS/2 packet first time */
+               if (unlikely(!priv->dev3))
+                       psmouse_queue_work(psmouse,
+                                          &priv->dev3_register_work, 0);
+
                if (psmouse->pktcnt == 3) {
-                       alps_report_bare_ps2_packet(psmouse, psmouse->packet,
-                                                   true);
+                       /* Once dev3 mouse device is registered report data */
+                       if (likely(!IS_ERR_OR_NULL(priv->dev3)))
+                               alps_report_bare_ps2_packet(priv->dev3,
+                                                           psmouse->packet,
+                                                           true);
                        return PSMOUSE_FULL_PACKET;
                }
                return PSMOUSE_GOOD_DATA;
@@ -1275,7 +1328,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
                            psmouse->pktcnt - 1,
                            psmouse->packet[psmouse->pktcnt - 1]);
 
-               if (priv->proto_version == ALPS_PROTO_V3 &&
+               if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE &&
                    psmouse->pktcnt == psmouse->pktsize) {
                        /*
                         * Some Dell boxes, such as Latitude E6440 or E7440
@@ -1780,7 +1833,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
         * all.
         */
        if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
-               psmouse_warn(psmouse, "trackstick E7 report failed\n");
+               psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n");
                ret = -ENODEV;
        } else {
                psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param);
@@ -1945,8 +1998,6 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
                                                   ALPS_REG_BASE_RUSHMORE);
                if (reg_val == -EIO)
                        goto error;
-               if (reg_val == -ENODEV)
-                       priv->flags &= ~ALPS_DUALPOINT;
        }
 
        if (alps_enter_command_mode(psmouse) ||
@@ -2162,11 +2213,18 @@ error:
        return ret;
 }
 
-static void alps_set_defaults(struct alps_data *priv)
+static int alps_set_protocol(struct psmouse *psmouse,
+                            struct alps_data *priv,
+                            const struct alps_protocol_info *protocol)
 {
-       priv->byte0 = 0x8f;
-       priv->mask0 = 0x8f;
-       priv->flags = ALPS_DUALPOINT;
+       psmouse->private = priv;
+
+       setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+
+       priv->proto_version = protocol->version;
+       priv->byte0 = protocol->byte0;
+       priv->mask0 = protocol->mask0;
+       priv->flags = protocol->flags;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
@@ -2182,6 +2240,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->x_max = 1023;
                priv->y_max = 767;
                break;
+
        case ALPS_PROTO_V3:
                priv->hw_init = alps_hw_init_v3;
                priv->process_packet = alps_process_packet_v3;
@@ -2190,6 +2249,23 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
                break;
+
+       case ALPS_PROTO_V3_RUSHMORE:
+               priv->hw_init = alps_hw_init_rushmore_v3;
+               priv->process_packet = alps_process_packet_v3;
+               priv->set_abs_params = alps_set_abs_params_mt;
+               priv->decode_fields = alps_decode_rushmore;
+               priv->nibble_commands = alps_v3_nibble_commands;
+               priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+               priv->x_bits = 16;
+               priv->y_bits = 12;
+
+               if (alps_probe_trackstick_v3(psmouse,
+                                            ALPS_REG_BASE_RUSHMORE) < 0)
+                       priv->flags &= ~ALPS_DUALPOINT;
+
+               break;
+
        case ALPS_PROTO_V4:
                priv->hw_init = alps_hw_init_v4;
                priv->process_packet = alps_process_packet_v4;
@@ -2197,6 +2273,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->nibble_commands = alps_v4_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_DISABLE;
                break;
+
        case ALPS_PROTO_V5:
                priv->hw_init = alps_hw_init_dolphin_v1;
                priv->process_packet = alps_process_touchpad_packet_v3_v5;
@@ -2204,14 +2281,12 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->byte0 = 0xc8;
-               priv->mask0 = 0xd8;
-               priv->flags = 0;
                priv->x_max = 1360;
                priv->y_max = 660;
                priv->x_bits = 23;
                priv->y_bits = 12;
                break;
+
        case ALPS_PROTO_V6:
                priv->hw_init = alps_hw_init_v6;
                priv->process_packet = alps_process_packet_v6;
@@ -2220,6 +2295,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->x_max = 2047;
                priv->y_max = 1535;
                break;
+
        case ALPS_PROTO_V7:
                priv->hw_init = alps_hw_init_v7;
                priv->process_packet = alps_process_packet_v7;
@@ -2227,19 +2303,21 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->x_max = 0xfff;
-               priv->y_max = 0x7ff;
-               priv->byte0 = 0x48;
-               priv->mask0 = 0x48;
+
+               if (alps_dolphin_get_device_area(psmouse, priv))
+                       return -EIO;
 
                if (priv->fw_ver[1] != 0xba)
                        priv->flags |= ALPS_BUTTONPAD;
+
                break;
        }
+
+       return 0;
 }
 
-static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
-                           unsigned char *e7, unsigned char *ec)
+static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
+                                                        unsigned char *ec)
 {
        const struct alps_model_info *model;
        int i;
@@ -2251,23 +2329,18 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
                    (!model->command_mode_resp ||
                     model->command_mode_resp == ec[2])) {
 
-                       priv->proto_version = model->proto_version;
-                       alps_set_defaults(priv);
-
-                       priv->flags = model->flags;
-                       priv->byte0 = model->byte0;
-                       priv->mask0 = model->mask0;
-
-                       return 0;
+                       return &model->protocol_info;
                }
        }
 
-       return -EINVAL;
+       return NULL;
 }
 
 static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
 {
+       const struct alps_protocol_info *protocol;
        unsigned char e6[4], e7[4], ec[4];
+       int error;
 
        /*
         * First try "E6 report".
@@ -2293,54 +2366,35 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
            alps_exit_command_mode(psmouse))
                return -EIO;
 
-       /* Save the Firmware version */
-       memcpy(priv->fw_ver, ec, 3);
-
-       if (alps_match_table(psmouse, priv, e7, ec) == 0) {
-               return 0;
-       } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
-                  ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
-               priv->proto_version = ALPS_PROTO_V5;
-               alps_set_defaults(priv);
-               if (alps_dolphin_get_device_area(psmouse, priv))
-                       return -EIO;
-               else
-                       return 0;
-       } else if (ec[0] == 0x88 &&
-                  ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
-               priv->proto_version = ALPS_PROTO_V7;
-               alps_set_defaults(priv);
-
-               return 0;
-       } else if (ec[0] == 0x88 && ec[1] == 0x08) {
-               priv->proto_version = ALPS_PROTO_V3;
-               alps_set_defaults(priv);
-
-               priv->hw_init = alps_hw_init_rushmore_v3;
-               priv->decode_fields = alps_decode_rushmore;
-               priv->x_bits = 16;
-               priv->y_bits = 12;
-               priv->flags |= ALPS_IS_RUSHMORE;
-
-               /* hack to make addr_command, nibble_command available */
-               psmouse->private = priv;
-
-               if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
-                       priv->flags &= ~ALPS_DUALPOINT;
-
-               return 0;
-       } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
-                  ec[2] >= 0x90 && ec[2] <= 0x9d) {
-               priv->proto_version = ALPS_PROTO_V3;
-               alps_set_defaults(priv);
-
-               return 0;
+       protocol = alps_match_table(e7, ec);
+       if (!protocol) {
+               if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
+                          ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
+                       protocol = &alps_v5_protocol_data;
+               } else if (ec[0] == 0x88 &&
+                          ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
+                       protocol = &alps_v7_protocol_data;
+               } else if (ec[0] == 0x88 && ec[1] == 0x08) {
+                       protocol = &alps_v3_rushmore_data;
+               } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+                          ec[2] >= 0x90 && ec[2] <= 0x9d) {
+                       protocol = &alps_v3_protocol_data;
+               } else {
+                       psmouse_dbg(psmouse,
+                                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+                       return -EINVAL;
+               }
        }
 
-       psmouse_dbg(psmouse,
-                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+       if (priv) {
+               /* Save the Firmware version */
+               memcpy(priv->fw_ver, ec, 3);
+               error = alps_set_protocol(psmouse, priv, protocol);
+               if (error)
+                       return error;
+       }
 
-       return -EINVAL;
+       return 0;
 }
 
 static int alps_reconnect(struct psmouse *psmouse)
@@ -2361,7 +2415,10 @@ static void alps_disconnect(struct psmouse *psmouse)
 
        psmouse_reset(psmouse);
        del_timer_sync(&priv->timer);
-       input_unregister_device(priv->dev2);
+       if (priv->dev2)
+               input_unregister_device(priv->dev2);
+       if (!IS_ERR_OR_NULL(priv->dev3))
+               input_unregister_device(priv->dev3);
        kfree(priv);
 }
 
@@ -2394,25 +2451,12 @@ static void alps_set_abs_params_mt(struct alps_data *priv,
 
 int alps_init(struct psmouse *psmouse)
 {
-       struct alps_data *priv;
-       struct input_dev *dev1 = psmouse->dev, *dev2;
-
-       priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
-       dev2 = input_allocate_device();
-       if (!priv || !dev2)
-               goto init_fail;
-
-       priv->dev2 = dev2;
-       setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
-
-       psmouse->private = priv;
-
-       psmouse_reset(psmouse);
-
-       if (alps_identify(psmouse, priv) < 0)
-               goto init_fail;
+       struct alps_data *priv = psmouse->private;
+       struct input_dev *dev1 = psmouse->dev;
+       int error;
 
-       if (priv->hw_init(psmouse))
+       error = priv->hw_init(psmouse);
+       if (error)
                goto init_fail;
 
        /*
@@ -2462,36 +2506,57 @@ int alps_init(struct psmouse *psmouse)
        }
 
        if (priv->flags & ALPS_DUALPOINT) {
+               struct input_dev *dev2;
+
+               dev2 = input_allocate_device();
+               if (!dev2) {
+                       psmouse_err(psmouse,
+                                   "failed to allocate trackstick device\n");
+                       error = -ENOMEM;
+                       goto init_fail;
+               }
+
+               snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1",
+                        psmouse->ps2dev.serio->phys);
+               dev2->phys = priv->phys2;
+
                /*
                 * format of input device name is: "protocol vendor name"
                 * see function psmouse_switch_protocol() in psmouse-base.c
                 */
                dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
+
+               dev2->id.bustype = BUS_I8042;
+               dev2->id.vendor  = 0x0002;
                dev2->id.product = PSMOUSE_ALPS;
                dev2->id.version = priv->proto_version;
-       } else {
-               dev2->name = "PS/2 ALPS Mouse";
-               dev2->id.product = PSMOUSE_PS2;
-               dev2->id.version = 0x0000;
-       }
+               dev2->dev.parent = &psmouse->ps2dev.serio->dev;
 
-       snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
-       dev2->phys = priv->phys;
-       dev2->id.bustype = BUS_I8042;
-       dev2->id.vendor  = 0x0002;
-       dev2->dev.parent = &psmouse->ps2dev.serio->dev;
+               input_set_capability(dev2, EV_REL, REL_X);
+               input_set_capability(dev2, EV_REL, REL_Y);
+               input_set_capability(dev2, EV_KEY, BTN_LEFT);
+               input_set_capability(dev2, EV_KEY, BTN_RIGHT);
+               input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
 
-       dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
-       dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-       dev2->keybit[BIT_WORD(BTN_LEFT)] =
-               BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
-
-       __set_bit(INPUT_PROP_POINTER, dev2->propbit);
-       if (priv->flags & ALPS_DUALPOINT)
+               __set_bit(INPUT_PROP_POINTER, dev2->propbit);
                __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
 
-       if (input_register_device(priv->dev2))
-               goto init_fail;
+               error = input_register_device(dev2);
+               if (error) {
+                       psmouse_err(psmouse,
+                                   "failed to register trackstick device: %d\n",
+                                   error);
+                       input_free_device(dev2);
+                       goto init_fail;
+               }
+
+               priv->dev2 = dev2;
+       }
+
+       priv->psmouse = psmouse;
+
+       INIT_DELAYED_WORK(&priv->dev3_register_work,
+                         alps_register_bare_ps2_mouse);
 
        psmouse->protocol_handler = alps_process_byte;
        psmouse->poll = alps_poll;
@@ -2509,25 +2574,56 @@ int alps_init(struct psmouse *psmouse)
 
 init_fail:
        psmouse_reset(psmouse);
-       input_free_device(dev2);
-       kfree(priv);
+       /*
+        * Even though we did not allocate psmouse->private we do free
+        * it here.
+        */
+       kfree(psmouse->private);
        psmouse->private = NULL;
-       return -1;
+       return error;
 }
 
 int alps_detect(struct psmouse *psmouse, bool set_properties)
 {
-       struct alps_data dummy;
+       struct alps_data *priv;
+       int error;
 
-       if (alps_identify(psmouse, &dummy) < 0)
-               return -1;
+       error = alps_identify(psmouse, NULL);
+       if (error)
+               return error;
+
+       /*
+        * Reset the device to make sure it is fully operational:
+        * on some laptops, like certain Dell Latitudes, we may
+        * fail to properly detect presence of trackstick if device
+        * has not been reset.
+        */
+       psmouse_reset(psmouse);
+
+       priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       error = alps_identify(psmouse, priv);
+       if (error)
+               return error;
 
        if (set_properties) {
                psmouse->vendor = "ALPS";
-               psmouse->name = dummy.flags & ALPS_DUALPOINT ?
+               psmouse->name = priv->flags & ALPS_DUALPOINT ?
                                "DualPoint TouchPad" : "GlidePoint";
-               psmouse->model = dummy.proto_version << 8;
+               psmouse->model = priv->proto_version;
+       } else {
+               /*
+                * Destroy alps_data structure we allocated earlier since
+                * this was just a "trial run". Otherwise we'll keep it
+                * to be used by alps_init() which has to be called if
+                * we succeed and set_properties is true.
+                */
+               kfree(priv);
+               psmouse->private = NULL;
        }
+
        return 0;
 }
 
index 66240b47819a9569c975cedd1d5443b2b59f6b86..02513c0502fc1309a9f8285ea2f4e3237fd7464a 100644 (file)
 
 #include <linux/input/mt.h>
 
-#define ALPS_PROTO_V1  1
-#define ALPS_PROTO_V2  2
-#define ALPS_PROTO_V3  3
-#define ALPS_PROTO_V4  4
-#define ALPS_PROTO_V5  5
-#define ALPS_PROTO_V6  6
-#define ALPS_PROTO_V7  7       /* t3btl t4s */
+#define ALPS_PROTO_V1          0x100
+#define ALPS_PROTO_V2          0x200
+#define ALPS_PROTO_V3          0x300
+#define ALPS_PROTO_V3_RUSHMORE 0x310
+#define ALPS_PROTO_V4          0x400
+#define ALPS_PROTO_V5          0x500
+#define ALPS_PROTO_V6          0x600
+#define ALPS_PROTO_V7          0x700   /* t3btl t4s */
 
 #define MAX_TOUCHES    2
 
@@ -45,6 +46,21 @@ enum V7_PACKET_ID {
         V7_PACKET_ID_UNKNOWN,
 };
 
+/**
+ * struct alps_protocol_info - information about protocol used by a device
+ * @version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ *   known format for this model.  The first byte of the report, ANDed with
+ *   mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ */
+struct alps_protocol_info {
+       u16 version;
+       u8 byte0, mask0;
+       unsigned int flags;
+};
+
 /**
  * struct alps_model_info - touchpad ID table
  * @signature: E7 response string to match.
@@ -52,23 +68,16 @@ enum V7_PACKET_ID {
  *   (aka command mode response) identifies the firmware minor version.  This
  *   can be used to distinguish different hardware models which are not
  *   uniquely identifiable through their E7 responses.
- * @proto_version: Indicates V1/V2/V3/...
- * @byte0: Helps figure out whether a position report packet matches the
- *   known format for this model.  The first byte of the report, ANDed with
- *   mask0, should match byte0.
- * @mask0: The mask used to check the first byte of the report.
- * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @protocol_info: information about protcol used by the device.
  *
  * Many (but not all) ALPS touchpads can be identified by looking at the
  * values returned in the "E7 report" and/or the "EC report."  This table
  * lists a number of such touchpads.
  */
 struct alps_model_info {
-       unsigned char signature[3];
-       unsigned char command_mode_resp;
-       unsigned char proto_version;
-       unsigned char byte0, mask0;
-       int flags;
+       u8 signature[3];
+       u8 command_mode_resp;
+       struct alps_protocol_info protocol_info;
 };
 
 /**
@@ -132,8 +141,12 @@ struct alps_fields {
 
 /**
  * struct alps_data - private data structure for the ALPS driver
- * @dev2: "Relative" device used to report trackstick or mouse activity.
- * @phys: Physical path for the relative device.
+ * @psmouse: Pointer to parent psmouse device
+ * @dev2: Trackstick device (can be NULL).
+ * @dev3: Generic PS/2 mouse (can be NULL, delayed registering).
+ * @phys2: Physical path for the trackstick device.
+ * @phys3: Physical path for the generic PS/2 mouse.
+ * @dev3_register_work: Delayed work for registering PS/2 mouse.
  * @nibble_commands: Command mapping used for touchpad register accesses.
  * @addr_command: Command used to tell the touchpad that a register address
  *   follows.
@@ -160,15 +173,19 @@ struct alps_fields {
  * @timer: Timer for flushing out the final report packet in the stream.
  */
 struct alps_data {
+       struct psmouse *psmouse;
        struct input_dev *dev2;
-       char phys[32];
+       struct input_dev *dev3;
+       char phys2[32];
+       char phys3[32];
+       struct delayed_work dev3_register_work;
 
        /* these are autodetected when the device is identified */
        const struct alps_nibble_commands *nibble_commands;
        int addr_command;
-       unsigned char proto_version;
-       unsigned char byte0, mask0;
-       unsigned char fw_ver[3];
+       u16 proto_version;
+       u8 byte0, mask0;
+       u8 fw_ver[3];
        int flags;
        int x_max;
        int y_max;
index 9118a1861a45cf0629f6380112eafeb2cc9b13b9..28dcfc822bf647f4386239d49487e111020a2272 100644 (file)
@@ -710,8 +710,3 @@ err_exit:
 
        return -1;
 }
-
-bool cypress_supported(void)
-{
-       return true;
-}
index 4720f21d2d70cfe2dbf20a626a5133010ba3e320..81f68aaed7c8567d3cfbfb4afc56008815ead34d 100644 (file)
@@ -172,7 +172,6 @@ struct cytp_data {
 #ifdef CONFIG_MOUSE_PS2_CYPRESS
 int cypress_detect(struct psmouse *psmouse, bool set_properties);
 int cypress_init(struct psmouse *psmouse);
-bool cypress_supported(void);
 #else
 inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
 {
@@ -182,10 +181,6 @@ inline int cypress_init(struct psmouse *psmouse)
 {
        return -ENOSYS;
 }
-inline bool cypress_supported(void)
-{
-       return 0;
-}
 #endif /* CONFIG_MOUSE_PS2_CYPRESS */
 
 #endif  /* _CYPRESS_PS2_H */
index fca38ba63bbe7f73f6e01195bd2e20395d683dd5..757f78a94aeccb1be6b80819f75752a09e705bf6 100644 (file)
@@ -424,11 +424,6 @@ fail:
        return error;
 }
 
-bool focaltech_supported(void)
-{
-       return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_FOCALTECH */
 
 int focaltech_init(struct psmouse *psmouse)
@@ -438,9 +433,4 @@ int focaltech_init(struct psmouse *psmouse)
        return 0;
 }
 
-bool focaltech_supported(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_FOCALTECH */
index 71870a9b548a8cd0a695fb9327b88e75daa83eb8..ca61ebff373e99a194011c1d8554d8ffb0766981 100644 (file)
@@ -19,6 +19,5 @@
 
 int focaltech_detect(struct psmouse *psmouse, bool set_properties);
 int focaltech_init(struct psmouse *psmouse);
-bool focaltech_supported(void);
 
 #endif
index 68469feda470d9d8b34c249cbcd02426795f94c8..4ccd01d7a48de9639a637db4a757c2c09c6c6836 100644 (file)
@@ -727,7 +727,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
        if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
                if (max_proto > PSMOUSE_IMEX) {
                        if (!set_properties || focaltech_init(psmouse) == 0) {
-                               if (focaltech_supported())
+                               if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH))
                                        return PSMOUSE_FOCALTECH;
                                /*
                                 * Note that we need to also restrict
@@ -776,7 +776,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * Try activating protocol, but check if support is enabled first, since
  * we try detecting Synaptics even when protocol is disabled.
  */
-                       if (synaptics_supported() &&
+                       if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) &&
                            (!set_properties || synaptics_init(psmouse) == 0)) {
                                return PSMOUSE_SYNAPTICS;
                        }
@@ -801,7 +801,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  */
        if (max_proto > PSMOUSE_IMEX &&
                        cypress_detect(psmouse, set_properties) == 0) {
-               if (cypress_supported()) {
+               if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) {
                        if (cypress_init(psmouse) == 0)
                                return PSMOUSE_CYPRESS;
 
index 7e705ee90b86cf0c86729aacbf6c7a1d981b0a12..f2cceb6493a0aea304c838043735ac3889041438 100644 (file)
@@ -1454,11 +1454,6 @@ int synaptics_init_relative(struct psmouse *psmouse)
        return __synaptics_init(psmouse, false);
 }
 
-bool synaptics_supported(void)
-{
-       return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_SYNAPTICS */
 
 void __init synaptics_module_init(void)
@@ -1470,9 +1465,4 @@ int synaptics_init(struct psmouse *psmouse)
        return -ENOSYS;
 }
 
-bool synaptics_supported(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
index 6faf9bb7c117d46f40af90a9232fc2810c921155..aedc3299b14e2b753c1e9d51da95b0953ea081cb 100644 (file)
@@ -175,6 +175,5 @@ int synaptics_detect(struct psmouse *psmouse, bool set_properties);
 int synaptics_init(struct psmouse *psmouse);
 int synaptics_init_relative(struct psmouse *psmouse);
 void synaptics_reset(struct psmouse *psmouse);
-bool synaptics_supported(void);
 
 #endif /* _SYNAPTICS_H */
index 1daa7ca04577de0854f77b1ff4d31faf241e0baa..9acdc080e7ecd21b2cd256c18e31c9c27337c7d1 100644 (file)
@@ -192,14 +192,6 @@ static bool gic_local_irq_is_routable(int intr)
        }
 }
 
-unsigned int gic_get_timer_pending(void)
-{
-       unsigned int vpe_pending;
-
-       vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-       return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
-}
-
 static void gic_bind_eic_interrupt(int irq, int set)
 {
        /* Convert irq vector # to hw int # */
index b8611e3e5e7451501fb1df22b27ef252c99dee21..09df54fc1fef2162bf06228dddc431f2f7a9feeb 100644 (file)
@@ -24,7 +24,7 @@ config MISDN_HFCMULTI
           * HFC-E1 (E1 interface for 2Mbit ISDN)
 
 config MISDN_HFCMULTI_8xx
-       boolean "Support for XHFC embedded board in HFC multiport driver"
+       bool "Support for XHFC embedded board in HFC multiport driver"
        depends on MISDN
        depends on MISDN_HFCMULTI
        depends on 8xx
index 3c92780bda09e17843f3cea5c7c35161e103c25c..ff48da61c94c849bf06cbb9ab9cb149515dcd626 100644 (file)
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
                enable_hwirq(hc);
                spin_unlock_irqrestore(&hc->lock, flags);
                /* Timeout 80ms */
-               current->state = TASK_UNINTERRUPTIBLE;
+               set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout((80 * HZ) / 1000);
                printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
                       hc->irq, hc->irqcnt);
index c4197503900ed3a1457e27d3a1c05207952036be..16f52ee739942b7bb23ec00d1a90e3f367e11e19 100644 (file)
@@ -1,6 +1,3 @@
-# Guest requires the device configuration and probing code.
-obj-$(CONFIG_LGUEST_GUEST) += lguest_device.o
-
 # Host requires the other files, which can be a module.
 obj-$(CONFIG_LGUEST)   += lg.o
 lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \
index 6590558d1d31c600b23c8d50f48b4e3b1c461326..7dc93aa004c86cfa988993d53164ea1d665aff97 100644 (file)
@@ -208,6 +208,14 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
  */
 int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
 {
+       /* If the launcher asked for a register with LHREQ_GETREG */
+       if (cpu->reg_read) {
+               if (put_user(*cpu->reg_read, user))
+                       return -EFAULT;
+               cpu->reg_read = NULL;
+               return sizeof(*cpu->reg_read);
+       }
+
        /* We stop running once the Guest is dead. */
        while (!cpu->lg->dead) {
                unsigned int irq;
@@ -217,21 +225,12 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
                if (cpu->hcall)
                        do_hypercalls(cpu);
 
-               /*
-                * It's possible the Guest did a NOTIFY hypercall to the
-                * Launcher.
-                */
-               if (cpu->pending_notify) {
-                       /*
-                        * Does it just needs to write to a registered
-                        * eventfd (ie. the appropriate virtqueue thread)?
-                        */
-                       if (!send_notify_to_eventfd(cpu)) {
-                               /* OK, we tell the main Launcher. */
-                               if (put_user(cpu->pending_notify, user))
-                                       return -EFAULT;
-                               return sizeof(cpu->pending_notify);
-                       }
+               /* Do we have to tell the Launcher about a trap? */
+               if (cpu->pending.trap) {
+                       if (copy_to_user(user, &cpu->pending,
+                                        sizeof(cpu->pending)))
+                               return -EFAULT;
+                       return sizeof(cpu->pending);
                }
 
                /*
index 83511eb0923d2f908a9e884d6470d493548e5cc0..1219af493c0f186bf0ccc71c5bac6b8c88564dbb 100644 (file)
@@ -117,9 +117,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
                /* Similarly, this sets the halted flag for run_guest(). */
                cpu->halted = 1;
                break;
-       case LHCALL_NOTIFY:
-               cpu->pending_notify = args->arg1;
-               break;
        default:
                /* It should be an architecture-specific hypercall. */
                if (lguest_arch_do_hcall(cpu, args))
@@ -189,7 +186,7 @@ static void do_async_hcalls(struct lg_cpu *cpu)
                 * Stop doing hypercalls if they want to notify the Launcher:
                 * it needs to service this first.
                 */
-               if (cpu->pending_notify)
+               if (cpu->pending.trap)
                        break;
        }
 }
@@ -280,7 +277,7 @@ void do_hypercalls(struct lg_cpu *cpu)
         * NOTIFY to the Launcher, we want to return now.  Otherwise we do
         * the hypercall.
         */
-       if (!cpu->pending_notify) {
+       if (!cpu->pending.trap) {
                do_hcall(cpu, cpu->hcall);
                /*
                 * Tricky point: we reset the hcall pointer to mark the
index 2eef40be4c041047aef345b464cd5c102c24a468..307e8b39e7d1dd2b0bdbe69bbefb6ca36b7b5567 100644 (file)
@@ -50,7 +50,10 @@ struct lg_cpu {
        /* Bitmap of what has changed: see CHANGED_* above. */
        int changed;
 
-       unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
+       /* Pending operation. */
+       struct lguest_pending pending;
+
+       unsigned long *reg_read; /* register from LHREQ_GETREG */
 
        /* At end of a page shared mapped over lguest_pages in guest. */
        unsigned long regs_page;
@@ -78,24 +81,18 @@ struct lg_cpu {
        struct lg_cpu_arch arch;
 };
 
-struct lg_eventfd {
-       unsigned long addr;
-       struct eventfd_ctx *event;
-};
-
-struct lg_eventfd_map {
-       unsigned int num;
-       struct lg_eventfd map[];
-};
-
 /* The private info the thread maintains about the guest. */
 struct lguest {
        struct lguest_data __user *lguest_data;
        struct lg_cpu cpus[NR_CPUS];
        unsigned int nr_cpus;
 
+       /* Valid guest memory pages must be < this. */
        u32 pfn_limit;
 
+       /* Device memory is >= pfn_limit and < device_limit. */
+       u32 device_limit;
+
        /*
         * This provides the offset to the base of guest-physical memory in the
         * Launcher.
@@ -110,8 +107,6 @@ struct lguest {
        unsigned int stack_pages;
        u32 tsc_khz;
 
-       struct lg_eventfd_map *eventfds;
-
        /* Dead? */
        const char *dead;
 };
@@ -197,8 +192,10 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu);
 void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
                   unsigned long vaddr, pte_t val);
 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
-bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
+bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode,
+                unsigned long *iomem);
 void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
+bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
 void page_table_guest_data_init(struct lg_cpu *cpu);
 
@@ -210,6 +207,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu);
 int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
 int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
 void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
+unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any);
 
 /* <arch>/switcher.S: */
 extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
deleted file mode 100644 (file)
index 89088d6..0000000
+++ /dev/null
@@ -1,540 +0,0 @@
-/*P:050
- * Lguest guests use a very simple method to describe devices.  It's a
- * series of device descriptors contained just above the top of normal Guest
- * memory.
- *
- * We use the standard "virtio" device infrastructure, which provides us with a
- * console, a network and a block driver.  Each one expects some configuration
- * information and a "virtqueue" or two to send and receive data.
-:*/
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/lguest_launcher.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/interrupt.h>
-#include <linux/virtio_ring.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <asm/paravirt.h>
-#include <asm/lguest_hcall.h>
-
-/* The pointer to our (page) of device descriptions. */
-static void *lguest_devices;
-
-/*
- * For Guests, device memory can be used as normal memory, so we cast away the
- * __iomem to quieten sparse.
- */
-static inline void *lguest_map(unsigned long phys_addr, unsigned long pages)
-{
-       return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages);
-}
-
-static inline void lguest_unmap(void *addr)
-{
-       iounmap((__force void __iomem *)addr);
-}
-
-/*D:100
- * Each lguest device is just a virtio device plus a pointer to its entry
- * in the lguest_devices page.
- */
-struct lguest_device {
-       struct virtio_device vdev;
-
-       /* The entry in the lguest_devices page for this device. */
-       struct lguest_device_desc *desc;
-};
-
-/*
- * Since the virtio infrastructure hands us a pointer to the virtio_device all
- * the time, it helps to have a curt macro to get a pointer to the struct
- * lguest_device it's enclosed in.
- */
-#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev)
-
-/*D:130
- * Device configurations
- *
- * The configuration information for a device consists of one or more
- * virtqueues, a feature bitmap, and some configuration bytes.  The
- * configuration bytes don't really matter to us: the Launcher sets them up, and
- * the driver will look at them during setup.
- *
- * A convenient routine to return the device's virtqueue config array:
- * immediately after the descriptor.
- */
-static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc)
-{
-       return (void *)(desc + 1);
-}
-
-/* The features come immediately after the virtqueues. */
-static u8 *lg_features(const struct lguest_device_desc *desc)
-{
-       return (void *)(lg_vq(desc) + desc->num_vq);
-}
-
-/* The config space comes after the two feature bitmasks. */
-static u8 *lg_config(const struct lguest_device_desc *desc)
-{
-       return lg_features(desc) + desc->feature_len * 2;
-}
-
-/* The total size of the config page used by this device (incl. desc) */
-static unsigned desc_size(const struct lguest_device_desc *desc)
-{
-       return sizeof(*desc)
-               + desc->num_vq * sizeof(struct lguest_vqconfig)
-               + desc->feature_len * 2
-               + desc->config_len;
-}
-
-/* This gets the device's feature bits. */
-static u64 lg_get_features(struct virtio_device *vdev)
-{
-       unsigned int i;
-       u32 features = 0;
-       struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
-       u8 *in_features = lg_features(desc);
-
-       /* We do this the slow but generic way. */
-       for (i = 0; i < min(desc->feature_len * 8, 32); i++)
-               if (in_features[i / 8] & (1 << (i % 8)))
-                       features |= (1 << i);
-
-       return features;
-}
-
-/*
- * To notify on reset or feature finalization, we (ab)use the NOTIFY
- * hypercall, with the descriptor address of the device.
- */
-static void status_notify(struct virtio_device *vdev)
-{
-       unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
-
-       hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
-}
-
-/*
- * The virtio core takes the features the Host offers, and copies the ones
- * supported by the driver into the vdev->features array.  Once that's all
- * sorted out, this routine is called so we can tell the Host which features we
- * understand and accept.
- */
-static int lg_finalize_features(struct virtio_device *vdev)
-{
-       unsigned int i, bits;
-       struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
-       /* Second half of bitmap is features we accept. */
-       u8 *out_features = lg_features(desc) + desc->feature_len;
-
-       /* Give virtio_ring a chance to accept features. */
-       vring_transport_features(vdev);
-
-       /* Make sure we don't have any features > 32 bits! */
-       BUG_ON((u32)vdev->features != vdev->features);
-
-       /*
-        * Since lguest is currently x86-only, we're little-endian.  That
-        * means we could just memcpy.  But it's not time critical, and in
-        * case someone copies this code, we do it the slow, obvious way.
-        */
-       memset(out_features, 0, desc->feature_len);
-       bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
-       for (i = 0; i < bits; i++) {
-               if (__virtio_test_bit(vdev, i))
-                       out_features[i / 8] |= (1 << (i % 8));
-       }
-
-       /* Tell Host we've finished with this device's feature negotiation */
-       status_notify(vdev);
-
-       return 0;
-}
-
-/* Once they've found a field, getting a copy of it is easy. */
-static void lg_get(struct virtio_device *vdev, unsigned int offset,
-                  void *buf, unsigned len)
-{
-       struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
-
-       /* Check they didn't ask for more than the length of the config! */
-       BUG_ON(offset + len > desc->config_len);
-       memcpy(buf, lg_config(desc) + offset, len);
-}
-
-/* Setting the contents is also trivial. */
-static void lg_set(struct virtio_device *vdev, unsigned int offset,
-                  const void *buf, unsigned len)
-{
-       struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
-
-       /* Check they didn't ask for more than the length of the config! */
-       BUG_ON(offset + len > desc->config_len);
-       memcpy(lg_config(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status field
- * of the device descriptor.
- */
-static u8 lg_get_status(struct virtio_device *vdev)
-{
-       return to_lgdev(vdev)->desc->status;
-}
-
-static void lg_set_status(struct virtio_device *vdev, u8 status)
-{
-       BUG_ON(!status);
-       to_lgdev(vdev)->desc->status = status;
-
-       /* Tell Host immediately if we failed. */
-       if (status & VIRTIO_CONFIG_S_FAILED)
-               status_notify(vdev);
-}
-
-static void lg_reset(struct virtio_device *vdev)
-{
-       /* 0 status means "reset" */
-       to_lgdev(vdev)->desc->status = 0;
-       status_notify(vdev);
-}
-
-/*
- * Virtqueues
- *
- * The other piece of infrastructure virtio needs is a "virtqueue": a way of
- * the Guest device registering buffers for the other side to read from or
- * write into (ie. send and receive buffers).  Each device can have multiple
- * virtqueues: for example the console driver uses one queue for sending and
- * another for receiving.
- *
- * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue
- * already exists in virtio_ring.c.  We just need to connect it up.
- *
- * We start with the information we need to keep about each virtqueue.
- */
-
-/*D:140 This is the information we remember about each virtqueue. */
-struct lguest_vq_info {
-       /* A copy of the information contained in the device config. */
-       struct lguest_vqconfig config;
-
-       /* The address where we mapped the virtio ring, so we can unmap it. */
-       void *pages;
-};
-
-/*
- * When the virtio_ring code wants to prod the Host, it calls us here and we
- * make a hypercall.  We hand the physical address of the virtqueue so the Host
- * knows which virtqueue we're talking about.
- */
-static bool lg_notify(struct virtqueue *vq)
-{
-       /*
-        * We store our virtqueue information in the "priv" pointer of the
-        * virtqueue structure.
-        */
-       struct lguest_vq_info *lvq = vq->priv;
-
-       hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
-       return true;
-}
-
-/* An extern declaration inside a C file is bad form.  Don't do it. */
-extern int lguest_setup_irq(unsigned int irq);
-
-/*
- * This routine finds the Nth virtqueue described in the configuration of
- * this device and sets it up.
- *
- * This is kind of an ugly duckling.  It'd be nicer to have a standard
- * representation of a virtqueue in the configuration space, but it seems that
- * everyone wants to do it differently.  The KVM coders want the Guest to
- * allocate its own pages and tell the Host where they are, but for lguest it's
- * simpler for the Host to simply tell us where the pages are.
- */
-static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
-                                   unsigned index,
-                                   void (*callback)(struct virtqueue *vq),
-                                   const char *name)
-{
-       struct lguest_device *ldev = to_lgdev(vdev);
-       struct lguest_vq_info *lvq;
-       struct virtqueue *vq;
-       int err;
-
-       if (!name)
-               return NULL;
-
-       /* We must have this many virtqueues. */
-       if (index >= ldev->desc->num_vq)
-               return ERR_PTR(-ENOENT);
-
-       lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
-       if (!lvq)
-               return ERR_PTR(-ENOMEM);
-
-       /*
-        * Make a copy of the "struct lguest_vqconfig" entry, which sits after
-        * the descriptor.  We need a copy because the config space might not
-        * be aligned correctly.
-        */
-       memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));
-
-       printk("Mapping virtqueue %i addr %lx\n", index,
-              (unsigned long)lvq->config.pfn << PAGE_SHIFT);
-       /* Figure out how many pages the ring will take, and map that memory */
-       lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
-                               DIV_ROUND_UP(vring_size(lvq->config.num,
-                                                       LGUEST_VRING_ALIGN),
-                                            PAGE_SIZE));
-       if (!lvq->pages) {
-               err = -ENOMEM;
-               goto free_lvq;
-       }
-
-       /*
-        * OK, tell virtio_ring.c to set up a virtqueue now we know its size
-        * and we've got a pointer to its pages.  Note that we set weak_barriers
-        * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
-        * barriers.
-        */
-       vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev,
-                                true, lvq->pages, lg_notify, callback, name);
-       if (!vq) {
-               err = -ENOMEM;
-               goto unmap;
-       }
-
-       /* Make sure the interrupt is allocated. */
-       err = lguest_setup_irq(lvq->config.irq);
-       if (err)
-               goto destroy_vring;
-
-       /*
-        * Tell the interrupt for this virtqueue to go to the virtio_ring
-        * interrupt handler.
-        *
-        * FIXME: We used to have a flag for the Host to tell us we could use
-        * the interrupt as a source of randomness: it'd be nice to have that
-        * back.
-        */
-       err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
-                         dev_name(&vdev->dev), vq);
-       if (err)
-               goto free_desc;
-
-       /*
-        * Last of all we hook up our 'struct lguest_vq_info" to the
-        * virtqueue's priv pointer.
-        */
-       vq->priv = lvq;
-       return vq;
-
-free_desc:
-       irq_free_desc(lvq->config.irq);
-destroy_vring:
-       vring_del_virtqueue(vq);
-unmap:
-       lguest_unmap(lvq->pages);
-free_lvq:
-       kfree(lvq);
-       return ERR_PTR(err);
-}
-/*:*/
-
-/* Cleaning up a virtqueue is easy */
-static void lg_del_vq(struct virtqueue *vq)
-{
-       struct lguest_vq_info *lvq = vq->priv;
-
-       /* Release the interrupt */
-       free_irq(lvq->config.irq, vq);
-       /* Tell virtio_ring.c to free the virtqueue. */
-       vring_del_virtqueue(vq);
-       /* Unmap the pages containing the ring. */
-       lguest_unmap(lvq->pages);
-       /* Free our own queue information. */
-       kfree(lvq);
-}
-
-static void lg_del_vqs(struct virtio_device *vdev)
-{
-       struct virtqueue *vq, *n;
-
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
-               lg_del_vq(vq);
-}
-
-static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
-                      struct virtqueue *vqs[],
-                      vq_callback_t *callbacks[],
-                      const char *names[])
-{
-       struct lguest_device *ldev = to_lgdev(vdev);
-       int i;
-
-       /* We must have this many virtqueues. */
-       if (nvqs > ldev->desc->num_vq)
-               return -ENOENT;
-
-       for (i = 0; i < nvqs; ++i) {
-               vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
-               if (IS_ERR(vqs[i]))
-                       goto error;
-       }
-       return 0;
-
-error:
-       lg_del_vqs(vdev);
-       return PTR_ERR(vqs[i]);
-}
-
-static const char *lg_bus_name(struct virtio_device *vdev)
-{
-       return "";
-}
-
-/* The ops structure which hooks everything together. */
-static const struct virtio_config_ops lguest_config_ops = {
-       .get_features = lg_get_features,
-       .finalize_features = lg_finalize_features,
-       .get = lg_get,
-       .set = lg_set,
-       .get_status = lg_get_status,
-       .set_status = lg_set_status,
-       .reset = lg_reset,
-       .find_vqs = lg_find_vqs,
-       .del_vqs = lg_del_vqs,
-       .bus_name = lg_bus_name,
-};
-
-/*
- * The root device for the lguest virtio devices.  This makes them appear as
- * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2.
- */
-static struct device *lguest_root;
-
-/*D:120
- * This is the core of the lguest bus: actually adding a new device.
- * It's a separate function because it's neater that way, and because an
- * earlier version of the code supported hotplug and unplug.  They were removed
- * early on because they were never used.
- *
- * As Andrew Tridgell says, "Untested code is buggy code".
- *
- * It's worth reading this carefully: we start with a pointer to the new device
- * descriptor in the "lguest_devices" page, and the offset into the device
- * descriptor page so we can uniquely identify it if things go badly wrong.
- */
-static void add_lguest_device(struct lguest_device_desc *d,
-                             unsigned int offset)
-{
-       struct lguest_device *ldev;
-
-       /* Start with zeroed memory; Linux's device layer counts on it. */
-       ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
-       if (!ldev) {
-               printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n",
-                      offset, d->type);
-               return;
-       }
-
-       /* This devices' parent is the lguest/ dir. */
-       ldev->vdev.dev.parent = lguest_root;
-       /*
-        * The device type comes straight from the descriptor.  There's also a
-        * device vendor field in the virtio_device struct, which we leave as
-        * 0.
-        */
-       ldev->vdev.id.device = d->type;
-       /*
-        * We have a simple set of routines for querying the device's
-        * configuration information and setting its status.
-        */
-       ldev->vdev.config = &lguest_config_ops;
-       /* And we remember the device's descriptor for lguest_config_ops. */
-       ldev->desc = d;
-
-       /*
-        * register_virtio_device() sets up the generic fields for the struct
-        * virtio_device and calls device_register().  This makes the bus
-        * infrastructure look for a matching driver.
-        */
-       if (register_virtio_device(&ldev->vdev) != 0) {
-               printk(KERN_ERR "Failed to register lguest dev %u type %u\n",
-                      offset, d->type);
-               kfree(ldev);
-       }
-}
-
-/*D:110
- * scan_devices() simply iterates through the device page.  The type 0 is
- * reserved to mean "end of devices".
- */
-static void scan_devices(void)
-{
-       unsigned int i;
-       struct lguest_device_desc *d;
-
-       /* We start at the page beginning, and skip over each entry. */
-       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
-               d = lguest_devices + i;
-
-               /* Once we hit a zero, stop. */
-               if (d->type == 0)
-                       break;
-
-               printk("Device at %i has size %u\n", i, desc_size(d));
-               add_lguest_device(d, i);
-       }
-}
-
-/*D:105
- * Fairly early in boot, lguest_devices_init() is called to set up the
- * lguest device infrastructure.  We check that we are a Guest by checking
- * pv_info.name: there are other ways of checking, but this seems most
- * obvious to me.
- *
- * So we can access the "struct lguest_device_desc"s easily, we map that memory
- * and store the pointer in the global "lguest_devices".  Then we register a
- * root device from which all our devices will hang (this seems to be the
- * correct sysfs incantation).
- *
- * Finally we call scan_devices() which adds all the devices found in the
- * lguest_devices page.
- */
-static int __init lguest_devices_init(void)
-{
-       if (strcmp(pv_info.name, "lguest") != 0)
-               return 0;
-
-       lguest_root = root_device_register("lguest");
-       if (IS_ERR(lguest_root))
-               panic("Could not register lguest root");
-
-       /* Devices are in a single page above top of "normal" mem */
-       lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
-
-       scan_devices();
-       return 0;
-}
-/* We do this after core stuff, but before the drivers. */
-postcore_initcall(lguest_devices_init);
-
-/*D:150
- * At this point in the journey we used to now wade through the lguest
- * devices themselves: net, block and console.  Since they're all now virtio
- * devices rather than lguest-specific, I've decided to ignore them.  Mostly,
- * they're kind of boring.  But this does mean you'll never experience the
- * thrill of reading the forbidden love scene buried deep in the block driver.
- *
- * "make Launcher" beckons, where we answer questions like "Where do Guests
- * come from?", and "What do you do when someone asks for optimization?".
- */
index 4263f4cc8c55c0668cfdd2e577a7d72a01dd472b..c4c6113eb9a617a95f684ffa45ac87386b289095 100644 (file)
  * launcher controls and communicates with the Guest.  For example,
  * the first write will tell us the Guest's memory layout and entry
  * point.  A read will run the Guest until something happens, such as
- * a signal or the Guest doing a NOTIFY out to the Launcher.  There is
- * also a way for the Launcher to attach eventfds to particular NOTIFY
- * values instead of returning from the read() call.
+ * a signal or the Guest accessing a device.
 :*/
 #include <linux/uaccess.h>
 #include <linux/miscdevice.h>
 #include <linux/fs.h>
 #include <linux/sched.h>
-#include <linux/eventfd.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/export.h>
 #include "lg.h"
 
-/*L:056
- * Before we move on, let's jump ahead and look at what the kernel does when
- * it needs to look up the eventfds.  That will complete our picture of how we
- * use RCU.
- *
- * The notification value is in cpu->pending_notify: we return true if it went
- * to an eventfd.
- */
-bool send_notify_to_eventfd(struct lg_cpu *cpu)
-{
-       unsigned int i;
-       struct lg_eventfd_map *map;
-
-       /*
-        * This "rcu_read_lock()" helps track when someone is still looking at
-        * the (RCU-using) eventfds array.  It's not actually a lock at all;
-        * indeed it's a noop in many configurations.  (You didn't expect me to
-        * explain all the RCU secrets here, did you?)
-        */
-       rcu_read_lock();
-       /*
-        * rcu_dereference is the counter-side of rcu_assign_pointer(); it
-        * makes sure we don't access the memory pointed to by
-        * cpu->lg->eventfds before cpu->lg->eventfds is set.  Sounds crazy,
-        * but Alpha allows this!  Paul McKenney points out that a really
-        * aggressive compiler could have the same effect:
-        *   http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
-        *
-        * So play safe, use rcu_dereference to get the rcu-protected pointer:
-        */
-       map = rcu_dereference(cpu->lg->eventfds);
-       /*
-        * Simple array search: even if they add an eventfd while we do this,
-        * we'll continue to use the old array and just won't see the new one.
-        */
-       for (i = 0; i < map->num; i++) {
-               if (map->map[i].addr == cpu->pending_notify) {
-                       eventfd_signal(map->map[i].event, 1);
-                       cpu->pending_notify = 0;
-                       break;
-               }
-       }
-       /* We're done with the rcu-protected variable cpu->lg->eventfds. */
-       rcu_read_unlock();
-
-       /* If we cleared the notification, it's because we found a match. */
-       return cpu->pending_notify == 0;
-}
-
-/*L:055
- * One of the more tricksy tricks in the Linux Kernel is a technique called
- * Read Copy Update.  Since one point of lguest is to teach lguest journeyers
- * about kernel coding, I use it here.  (In case you're curious, other purposes
- * include learning about virtualization and instilling a deep appreciation for
- * simplicity and puppies).
- *
- * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
- * add new eventfds without ever blocking readers from accessing the array.
- * The current Launcher only does this during boot, so that never happens.  But
- * Read Copy Update is cool, and adding a lock risks damaging even more puppies
- * than this code does.
- *
- * We allocate a brand new one-larger array, copy the old one and add our new
- * element.  Then we make the lg eventfd pointer point to the new array.
- * That's the easy part: now we need to free the old one, but we need to make
- * sure no slow CPU somewhere is still looking at it.  That's what
- * synchronize_rcu does for us: waits until every CPU has indicated that it has
- * moved on to know it's no longer using the old one.
- *
- * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
- */
-static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
+/*L:052
+  The Launcher can get the registers, and also set some of them.
+*/
+static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input)
 {
-       struct lg_eventfd_map *new, *old = lg->eventfds;
-
-       /*
-        * We don't allow notifications on value 0 anyway (pending_notify of
-        * 0 means "nothing pending").
-        */
-       if (!addr)
-               return -EINVAL;
-
-       /*
-        * Replace the old array with the new one, carefully: others can
-        * be accessing it at the same time.
-        */
-       new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
-                     GFP_KERNEL);
-       if (!new)
-               return -ENOMEM;
+       unsigned long which;
 
-       /* First make identical copy. */
-       memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
-       new->num = old->num;
-
-       /* Now append new entry. */
-       new->map[new->num].addr = addr;
-       new->map[new->num].event = eventfd_ctx_fdget(fd);
-       if (IS_ERR(new->map[new->num].event)) {
-               int err =  PTR_ERR(new->map[new->num].event);
-               kfree(new);
-               return err;
-       }
-       new->num++;
+       /* We re-use the ptrace structure to specify which register to read. */
+       if (get_user(which, input) != 0)
+               return -EFAULT;
 
        /*
-        * Now put new one in place: rcu_assign_pointer() is a fancy way of
-        * doing "lg->eventfds = new", but it uses memory barriers to make
-        * absolutely sure that the contents of "new" written above is nailed
-        * down before we actually do the assignment.
+        * We set up the cpu register pointer, and their next read will
+        * actually get the value (instead of running the guest).
         *
-        * We have to think about these kinds of things when we're operating on
-        * live data without locks.
+        * The last argument 'true' says we can access any register.
         */
-       rcu_assign_pointer(lg->eventfds, new);
+       cpu->reg_read = lguest_arch_regptr(cpu, which, true);
+       if (!cpu->reg_read)
+               return -ENOENT;
 
-       /*
-        * We're not in a big hurry.  Wait until no one's looking at old
-        * version, then free it.
-        */
-       synchronize_rcu();
-       kfree(old);
-
-       return 0;
+       /* And because this is a write() call, we return the length used. */
+       return sizeof(unsigned long) * 2;
 }
 
-/*L:052
- * Receiving notifications from the Guest is usually done by attaching a
- * particular LHCALL_NOTIFY value to an event filedescriptor.  The eventfd will
- * become readable when the Guest does an LHCALL_NOTIFY with that value.
- *
- * This is really convenient for processing each virtqueue in a separate
- * thread.
- */
-static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
+static int setreg(struct lg_cpu *cpu, const unsigned long __user *input)
 {
-       unsigned long addr, fd;
-       int err;
+       unsigned long which, value, *reg;
 
-       if (get_user(addr, input) != 0)
+       /* We re-use the ptrace structure to specify which register to read. */
+       if (get_user(which, input) != 0)
                return -EFAULT;
        input++;
-       if (get_user(fd, input) != 0)
+       if (get_user(value, input) != 0)
                return -EFAULT;
 
-       /*
-        * Just make sure two callers don't add eventfds at once.  We really
-        * only need to lock against callers adding to the same Guest, so using
-        * the Big Lguest Lock is overkill.  But this is setup, not a fast path.
-        */
-       mutex_lock(&lguest_lock);
-       err = add_eventfd(lg, addr, fd);
-       mutex_unlock(&lguest_lock);
+       /* The last argument 'false' means we can't access all registers. */
+       reg = lguest_arch_regptr(cpu, which, false);
+       if (!reg)
+               return -ENOENT;
 
-       return err;
+       *reg = value;
+
+       /* And because this is a write() call, we return the length used. */
+       return sizeof(unsigned long) * 3;
 }
 
 /*L:050
@@ -194,6 +81,23 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
        return 0;
 }
 
+/*L:053
+ * Deliver a trap: this is used by the Launcher if it can't emulate
+ * an instruction.
+ */
+static int trap(struct lg_cpu *cpu, const unsigned long __user *input)
+{
+       unsigned long trapnum;
+
+       if (get_user(trapnum, input) != 0)
+               return -EFAULT;
+
+       if (!deliver_trap(cpu, trapnum))
+               return -EINVAL;
+
+       return 0;
+}
+
 /*L:040
  * Once our Guest is initialized, the Launcher makes it run by reading
  * from /dev/lguest.
@@ -237,8 +141,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
         * If we returned from read() last time because the Guest sent I/O,
         * clear the flag.
         */
-       if (cpu->pending_notify)
-               cpu->pending_notify = 0;
+       if (cpu->pending.trap)
+               cpu->pending.trap = 0;
 
        /* Run the Guest until something interesting happens. */
        return run_guest(cpu, (unsigned long __user *)user);
@@ -319,7 +223,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
        /* "struct lguest" contains all we (the Host) know about a Guest. */
        struct lguest *lg;
        int err;
-       unsigned long args[3];
+       unsigned long args[4];
 
        /*
         * We grab the Big Lguest lock, which protects against multiple
@@ -343,21 +247,15 @@ static int initialize(struct file *file, const unsigned long __user *input)
                goto unlock;
        }
 
-       lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
-       if (!lg->eventfds) {
-               err = -ENOMEM;
-               goto free_lg;
-       }
-       lg->eventfds->num = 0;
-
        /* Populate the easy fields of our "struct lguest" */
        lg->mem_base = (void __user *)args[0];
        lg->pfn_limit = args[1];
+       lg->device_limit = args[3];
 
        /* This is the first cpu (cpu 0) and it will start booting at args[2] */
        err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
        if (err)
-               goto free_eventfds;
+               goto free_lg;
 
        /*
         * Initialize the Guest's shadow page tables.  This allocates
@@ -378,8 +276,6 @@ static int initialize(struct file *file, const unsigned long __user *input)
 free_regs:
        /* FIXME: This should be in free_vcpu */
        free_page(lg->cpus[0].regs_page);
-free_eventfds:
-       kfree(lg->eventfds);
 free_lg:
        kfree(lg);
 unlock:
@@ -432,8 +328,12 @@ static ssize_t write(struct file *file, const char __user *in,
                return initialize(file, input);
        case LHREQ_IRQ:
                return user_send_irq(cpu, input);
-       case LHREQ_EVENTFD:
-               return attach_eventfd(lg, input);
+       case LHREQ_GETREG:
+               return getreg_setup(cpu, input);
+       case LHREQ_SETREG:
+               return setreg(cpu, input);
+       case LHREQ_TRAP:
+               return trap(cpu, input);
        default:
                return -EINVAL;
        }
@@ -478,11 +378,6 @@ static int close(struct inode *inode, struct file *file)
                mmput(lg->cpus[i].mm);
        }
 
-       /* Release any eventfds they registered. */
-       for (i = 0; i < lg->eventfds->num; i++)
-               eventfd_ctx_put(lg->eventfds->map[i].event);
-       kfree(lg->eventfds);
-
        /*
         * If lg->dead doesn't contain an error code it will be NULL or a
         * kmalloc()ed string, either of which is ok to hand to kfree().
index e8b55c3a617042e91c2936c5576b45b6387f9fc4..e3abebc912c00a0ffaaa4ba34b09279b394d002b 100644 (file)
@@ -250,6 +250,16 @@ static void release_pte(pte_t pte)
 }
 /*:*/
 
+static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
+{
+       /* We don't handle large pages. */
+       if (pte_flags(gpte) & _PAGE_PSE)
+               return false;
+
+       return (pte_pfn(gpte) >= cpu->lg->pfn_limit
+               && pte_pfn(gpte) < cpu->lg->device_limit);
+}
+
 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
 {
        if ((pte_flags(gpte) & _PAGE_PSE) ||
@@ -374,8 +384,14 @@ static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
  *
  * If we fixed up the fault (ie. we mapped the address), this routine returns
  * true.  Otherwise, it was a real fault and we need to tell the Guest.
+ *
+ * There's a corner case: they're trying to access memory between
+ * pfn_limit and device_limit, which is I/O memory.  In this case, we
+ * return false and set @iomem to the physical address, so the the
+ * Launcher can handle the instruction manually.
  */
-bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
+                unsigned long *iomem)
 {
        unsigned long gpte_ptr;
        pte_t gpte;
@@ -383,6 +399,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
        pmd_t gpmd;
        pgd_t gpgd;
 
+       *iomem = 0;
+
        /* We never demand page the Switcher, so trying is a mistake. */
        if (vaddr >= switcher_addr)
                return false;
@@ -459,6 +477,12 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
        if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
                return false;
 
+       /* If they're accessing io memory, we expect a fault. */
+       if (gpte_in_iomem(cpu, gpte)) {
+               *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
+               return false;
+       }
+
        /*
         * Check that the Guest PTE flags are OK, and the page number is below
         * the pfn_limit (ie. not mapping the Launcher binary).
@@ -553,7 +577,9 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
  */
 void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
 {
-       if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
+       unsigned long iomem;
+
+       if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
                kill_guest(cpu, "bad stack page %#lx", vaddr);
 }
 /*:*/
@@ -647,7 +673,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu)
 /*:*/
 
 /* We walk down the guest page tables to get a guest-physical address */
-unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
+bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
 {
        pgd_t gpgd;
        pte_t gpte;
@@ -656,31 +682,47 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
 #endif
 
        /* Still not set up?  Just map 1:1. */
-       if (unlikely(cpu->linear_pages))
-               return vaddr;
+       if (unlikely(cpu->linear_pages)) {
+               *paddr = vaddr;
+               return true;
+       }
 
        /* First step: get the top-level Guest page table entry. */
        gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
        /* Toplevel not present?  We can't map it in. */
-       if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
-               kill_guest(cpu, "Bad address %#lx", vaddr);
-               return -1UL;
-       }
+       if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+               goto fail;
 
 #ifdef CONFIG_X86_PAE
        gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
-       if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) {
-               kill_guest(cpu, "Bad address %#lx", vaddr);
-               return -1UL;
-       }
+       if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+               goto fail;
        gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
 #else
        gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
 #endif
        if (!(pte_flags(gpte) & _PAGE_PRESENT))
-               kill_guest(cpu, "Bad address %#lx", vaddr);
+               goto fail;
+
+       *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
+       return true;
+
+fail:
+       *paddr = -1UL;
+       return false;
+}
 
-       return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
+/*
+ * This is the version we normally use: kills the Guest if it uses a
+ * bad address
+ */
+unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
+{
+       unsigned long paddr;
+
+       if (!__guest_pa(cpu, vaddr, &paddr))
+               kill_guest(cpu, "Bad address %#lx", vaddr);
+       return paddr;
 }
 
 /*
@@ -912,7 +954,8 @@ static void __guest_set_pte(struct lg_cpu *cpu, int idx,
                         * now.  This shaves 10% off a copy-on-write
                         * micro-benchmark.
                         */
-                       if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+                       if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
+                           && !gpte_in_iomem(cpu, gpte)) {
                                if (!check_gpte(cpu, gpte))
                                        return;
                                set_pte(spte,
index 6adfd7ba4c977434221d859185772f26530c8bc7..30f2aef69d787d7245b3e91c53b98a0a0216cdb9 100644 (file)
@@ -182,6 +182,52 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
 }
 /*:*/
 
+unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
+{
+       switch (reg_off) {
+       case offsetof(struct pt_regs, bx):
+               return &cpu->regs->ebx;
+       case offsetof(struct pt_regs, cx):
+               return &cpu->regs->ecx;
+       case offsetof(struct pt_regs, dx):
+               return &cpu->regs->edx;
+       case offsetof(struct pt_regs, si):
+               return &cpu->regs->esi;
+       case offsetof(struct pt_regs, di):
+               return &cpu->regs->edi;
+       case offsetof(struct pt_regs, bp):
+               return &cpu->regs->ebp;
+       case offsetof(struct pt_regs, ax):
+               return &cpu->regs->eax;
+       case offsetof(struct pt_regs, ip):
+               return &cpu->regs->eip;
+       case offsetof(struct pt_regs, sp):
+               return &cpu->regs->esp;
+       }
+
+       /* Launcher can read these, but we don't allow any setting. */
+       if (any) {
+               switch (reg_off) {
+               case offsetof(struct pt_regs, ds):
+                       return &cpu->regs->ds;
+               case offsetof(struct pt_regs, es):
+                       return &cpu->regs->es;
+               case offsetof(struct pt_regs, fs):
+                       return &cpu->regs->fs;
+               case offsetof(struct pt_regs, gs):
+                       return &cpu->regs->gs;
+               case offsetof(struct pt_regs, cs):
+                       return &cpu->regs->cs;
+               case offsetof(struct pt_regs, flags):
+                       return &cpu->regs->eflags;
+               case offsetof(struct pt_regs, ss):
+                       return &cpu->regs->ss;
+               }
+       }
+
+       return NULL;
+}
+
 /*M:002
  * There are hooks in the scheduler which we can register to tell when we
  * get kicked off the CPU (preempt_notifier_register()).  This would allow us
@@ -269,110 +315,73 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
  * usually attached to a PC.
  *
  * When the Guest uses one of these instructions, we get a trap (General
- * Protection Fault) and come here.  We see if it's one of those troublesome
- * instructions and skip over it.  We return true if we did.
+ * Protection Fault) and come here.  We queue this to be sent out to the
+ * Launcher to handle.
  */
-static int emulate_insn(struct lg_cpu *cpu)
-{
-       u8 insn;
-       unsigned int insnlen = 0, in = 0, small_operand = 0;
-       /*
-        * The eip contains the *virtual* address of the Guest's instruction:
-        * walk the Guest's page tables to find the "physical" address.
-        */
-       unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
-
-       /*
-        * This must be the Guest kernel trying to do something, not userspace!
-        * The bottom two bits of the CS segment register are the privilege
-        * level.
-        */
-       if ((cpu->regs->cs & 3) != GUEST_PL)
-               return 0;
-
-       /* Decoding x86 instructions is icky. */
-       insn = lgread(cpu, physaddr, u8);
 
-       /*
-        * Around 2.6.33, the kernel started using an emulation for the
-        * cmpxchg8b instruction in early boot on many configurations.  This
-        * code isn't paravirtualized, and it tries to disable interrupts.
-        * Ignore it, which will Mostly Work.
-        */
-       if (insn == 0xfa) {
-               /* "cli", or Clear Interrupt Enable instruction.  Skip it. */
-               cpu->regs->eip++;
-               return 1;
+/*
+ * The eip contains the *virtual* address of the Guest's instruction:
+ * we copy the instruction here so the Launcher doesn't have to walk
+ * the page tables to decode it.  We handle the case (eg. in a kernel
+ * module) where the instruction is over two pages, and the pages are
+ * virtually but not physically contiguous.
+ *
+ * The longest possible x86 instruction is 15 bytes, but we don't handle
+ * anything that strange.
+ */
+static void copy_from_guest(struct lg_cpu *cpu,
+                           void *dst, unsigned long vaddr, size_t len)
+{
+       size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE);
+       unsigned long paddr;
+
+       BUG_ON(len > PAGE_SIZE);
+
+       /* If it goes over a page, copy in two parts. */
+       if (len > to_page_end) {
+               /* But make sure the next page is mapped! */
+               if (__guest_pa(cpu, vaddr + to_page_end, &paddr))
+                       copy_from_guest(cpu, dst + to_page_end,
+                                       vaddr + to_page_end,
+                                       len - to_page_end);
+               else
+                       /* Otherwise fill with zeroes. */
+                       memset(dst + to_page_end, 0, len - to_page_end);
+               len = to_page_end;
        }
 
-       /*
-        * 0x66 is an "operand prefix".  It means a 16, not 32 bit in/out.
-        */
-       if (insn == 0x66) {
-               small_operand = 1;
-               /* The instruction is 1 byte so far, read the next byte. */
-               insnlen = 1;
-               insn = lgread(cpu, physaddr + insnlen, u8);
-       }
+       /* This will kill the guest if it isn't mapped, but that
+        * shouldn't happen. */
+       __lgread(cpu, dst, guest_pa(cpu, vaddr), len);
+}
 
-       /*
-        * We can ignore the lower bit for the moment and decode the 4 opcodes
-        * we need to emulate.
-        */
-       switch (insn & 0xFE) {
-       case 0xE4: /* in     <next byte>,%al */
-               insnlen += 2;
-               in = 1;
-               break;
-       case 0xEC: /* in     (%dx),%al */
-               insnlen += 1;
-               in = 1;
-               break;
-       case 0xE6: /* out    %al,<next byte> */
-               insnlen += 2;
-               break;
-       case 0xEE: /* out    %al,(%dx) */
-               insnlen += 1;
-               break;
-       default:
-               /* OK, we don't know what this is, can't emulate. */
-               return 0;
-       }
 
-       /*
-        * If it was an "IN" instruction, they expect the result to be read
-        * into %eax, so we change %eax.  We always return all-ones, which
-        * traditionally means "there's nothing there".
-        */
-       if (in) {
-               /* Lower bit tells means it's a 32/16 bit access */
-               if (insn & 0x1) {
-                       if (small_operand)
-                               cpu->regs->eax |= 0xFFFF;
-                       else
-                               cpu->regs->eax = 0xFFFFFFFF;
-               } else
-                       cpu->regs->eax |= 0xFF;
-       }
-       /* Finally, we've "done" the instruction, so move past it. */
-       cpu->regs->eip += insnlen;
-       /* Success! */
-       return 1;
+static void setup_emulate_insn(struct lg_cpu *cpu)
+{
+       cpu->pending.trap = 13;
+       copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
+                       sizeof(cpu->pending.insn));
+}
+
+static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr)
+{
+       cpu->pending.trap = 14;
+       cpu->pending.addr = iomem_addr;
+       copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
+                       sizeof(cpu->pending.insn));
 }
 
 /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
 void lguest_arch_handle_trap(struct lg_cpu *cpu)
 {
+       unsigned long iomem_addr;
+
        switch (cpu->regs->trapnum) {
        case 13: /* We've intercepted a General Protection Fault. */
-               /*
-                * Check if this was one of those annoying IN or OUT
-                * instructions which we need to emulate.  If so, we just go
-                * back into the Guest after we've done it.
-                */
+               /* Hand to Launcher to emulate those pesky IN and OUT insns */
                if (cpu->regs->errcode == 0) {
-                       if (emulate_insn(cpu))
-                               return;
+                       setup_emulate_insn(cpu);
+                       return;
                }
                break;
        case 14: /* We've intercepted a Page Fault. */
@@ -387,9 +396,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
                 * whether kernel or userspace code.
                 */
                if (demand_page(cpu, cpu->arch.last_pagefault,
-                               cpu->regs->errcode))
+                               cpu->regs->errcode, &iomem_addr))
                        return;
 
+               /* Was this an access to memory mapped IO? */
+               if (iomem_addr) {
+                       /* Tell Launcher, let it handle it. */
+                       setup_iomem_insn(cpu, iomem_addr);
+                       return;
+               }
+
                /*
                 * OK, it's really not there (or not OK): the Guest needs to
                 * know.  We write out the cr2 value so it knows where the
index c39644478aa4e660f0ec2ddefedea4efbfd776b2..63e05e32b46269e29f8e75e03073d5587fd6d916 100644 (file)
@@ -178,7 +178,7 @@ config MD_FAULTY
 source "drivers/md/bcache/Kconfig"
 
 config BLK_DEV_DM_BUILTIN
-       boolean
+       bool
 
 config BLK_DEV_DM
        tristate "Device mapper support"
@@ -197,7 +197,7 @@ config BLK_DEV_DM
          If unsure, say N.
 
 config DM_DEBUG
-       boolean "Device mapper debugging support"
+       bool "Device mapper debugging support"
        depends on BLK_DEV_DM
        ---help---
          Enable this for messages that may help debug device-mapper problems.
index 08981be7baa183dbe963b6e38cd4866f34e278a7..713a96237a80c34951302dfa4a5ea6db9f44c39b 100644 (file)
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
+#include <linux/kthread.h>
 #include <linux/backing-dev.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
+#include <linux/rbtree.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
 #include <crypto/hash.h>
@@ -58,7 +60,8 @@ struct dm_crypt_io {
        atomic_t io_pending;
        int error;
        sector_t sector;
-       struct dm_crypt_io *base_io;
+
+       struct rb_node rb_node;
 } CRYPTO_MINALIGN_ATTR;
 
 struct dm_crypt_request {
@@ -108,7 +111,8 @@ struct iv_tcw_private {
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
  */
-enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
+            DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
 
 /*
  * The fields in here must be read only after initialization.
@@ -121,14 +125,18 @@ struct crypt_config {
         * pool for per bio private data, crypto requests and
         * encryption requeusts/buffer pages
         */
-       mempool_t *io_pool;
        mempool_t *req_pool;
        mempool_t *page_pool;
        struct bio_set *bs;
+       struct mutex bio_alloc_lock;
 
        struct workqueue_struct *io_queue;
        struct workqueue_struct *crypt_queue;
 
+       struct task_struct *write_thread;
+       wait_queue_head_t write_thread_wait;
+       struct rb_root write_tree;
+
        char *cipher;
        char *cipher_string;
 
@@ -172,9 +180,6 @@ struct crypt_config {
 };
 
 #define MIN_IOS        16
-#define MIN_POOL_PAGES 32
-
-static struct kmem_cache *_crypt_io_pool;
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -946,57 +951,70 @@ static int crypt_convert(struct crypt_config *cc,
        return 0;
 }
 
+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
- * May return a smaller bio when running out of pages, indicated by
- * *out_of_pages set to 1.
+ *
+ * This function may be called concurrently. If we allocate from the mempool
+ * concurrently, there is a possibility of deadlock. For example, if we have
+ * mempool of 256 pages, two processes, each wanting 256, pages allocate from
+ * the mempool concurrently, it may deadlock in a situation where both processes
+ * have allocated 128 pages and the mempool is exhausted.
+ *
+ * In order to avoid this scenario we allocate the pages under a mutex.
+ *
+ * In order to not degrade performance with excessive locking, we try
+ * non-blocking allocations without a mutex first but on failure we fallback
+ * to blocking allocations with a mutex.
  */
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
-                                     unsigned *out_of_pages)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 {
        struct crypt_config *cc = io->cc;
        struct bio *clone;
        unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
-       unsigned i, len;
+       gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+       unsigned i, len, remaining_size;
        struct page *page;
+       struct bio_vec *bvec;
+
+retry:
+       if (unlikely(gfp_mask & __GFP_WAIT))
+               mutex_lock(&cc->bio_alloc_lock);
 
        clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
        if (!clone)
-               return NULL;
+               goto return_clone;
 
        clone_init(io, clone);
-       *out_of_pages = 0;
+
+       remaining_size = size;
 
        for (i = 0; i < nr_iovecs; i++) {
                page = mempool_alloc(cc->page_pool, gfp_mask);
                if (!page) {
-                       *out_of_pages = 1;
-                       break;
+                       crypt_free_buffer_pages(cc, clone);
+                       bio_put(clone);
+                       gfp_mask |= __GFP_WAIT;
+                       goto retry;
                }
 
-               /*
-                * If additional pages cannot be allocated without waiting,
-                * return a partially-allocated bio.  The caller will then try
-                * to allocate more bios while submitting this partial bio.
-                */
-               gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+               len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
 
-               len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+               bvec = &clone->bi_io_vec[clone->bi_vcnt++];
+               bvec->bv_page = page;
+               bvec->bv_len = len;
+               bvec->bv_offset = 0;
 
-               if (!bio_add_page(clone, page, len, 0)) {
-                       mempool_free(page, cc->page_pool);
-                       break;
-               }
+               clone->bi_iter.bi_size += len;
 
-               size -= len;
+               remaining_size -= len;
        }
 
-       if (!clone->bi_iter.bi_size) {
-               bio_put(clone);
-               return NULL;
-       }
+return_clone:
+       if (unlikely(gfp_mask & __GFP_WAIT))
+               mutex_unlock(&cc->bio_alloc_lock);
 
        return clone;
 }
@@ -1020,7 +1038,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
        io->base_bio = bio;
        io->sector = sector;
        io->error = 0;
-       io->base_io = NULL;
        io->ctx.req = NULL;
        atomic_set(&io->io_pending, 0);
 }
@@ -1033,13 +1050,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
- * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *base_bio = io->base_bio;
-       struct dm_crypt_io *base_io = io->base_io;
        int error = io->error;
 
        if (!atomic_dec_and_test(&io->io_pending))
@@ -1047,16 +1062,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 
        if (io->ctx.req)
                crypt_free_req(cc, io->ctx.req, base_bio);
-       if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
-               mempool_free(io, cc->io_pool);
-
-       if (likely(!base_io))
-               bio_endio(base_bio, error);
-       else {
-               if (error && !base_io->error)
-                       base_io->error = error;
-               crypt_dec_pending(base_io);
-       }
+
+       bio_endio(base_bio, error);
 }
 
 /*
@@ -1138,37 +1145,97 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        return 0;
 }
 
+static void kcryptd_io_read_work(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+
+       crypt_inc_pending(io);
+       if (kcryptd_io_read(io, GFP_NOIO))
+               io->error = -ENOMEM;
+       crypt_dec_pending(io);
+}
+
+static void kcryptd_queue_read(struct dm_crypt_io *io)
+{
+       struct crypt_config *cc = io->cc;
+
+       INIT_WORK(&io->work, kcryptd_io_read_work);
+       queue_work(cc->io_queue, &io->work);
+}
+
 static void kcryptd_io_write(struct dm_crypt_io *io)
 {
        struct bio *clone = io->ctx.bio_out;
+
        generic_make_request(clone);
 }
 
-static void kcryptd_io(struct work_struct *work)
+#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
+
+static int dmcrypt_write(void *data)
 {
-       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = data;
+       struct dm_crypt_io *io;
 
-       if (bio_data_dir(io->base_bio) == READ) {
-               crypt_inc_pending(io);
-               if (kcryptd_io_read(io, GFP_NOIO))
-                       io->error = -ENOMEM;
-               crypt_dec_pending(io);
-       } else
-               kcryptd_io_write(io);
-}
+       while (1) {
+               struct rb_root write_tree;
+               struct blk_plug plug;
 
-static void kcryptd_queue_io(struct dm_crypt_io *io)
-{
-       struct crypt_config *cc = io->cc;
+               DECLARE_WAITQUEUE(wait, current);
 
-       INIT_WORK(&io->work, kcryptd_io);
-       queue_work(cc->io_queue, &io->work);
+               spin_lock_irq(&cc->write_thread_wait.lock);
+continue_locked:
+
+               if (!RB_EMPTY_ROOT(&cc->write_tree))
+                       goto pop_from_list;
+
+               __set_current_state(TASK_INTERRUPTIBLE);
+               __add_wait_queue(&cc->write_thread_wait, &wait);
+
+               spin_unlock_irq(&cc->write_thread_wait.lock);
+
+               if (unlikely(kthread_should_stop())) {
+                       set_task_state(current, TASK_RUNNING);
+                       remove_wait_queue(&cc->write_thread_wait, &wait);
+                       break;
+               }
+
+               schedule();
+
+               set_task_state(current, TASK_RUNNING);
+               spin_lock_irq(&cc->write_thread_wait.lock);
+               __remove_wait_queue(&cc->write_thread_wait, &wait);
+               goto continue_locked;
+
+pop_from_list:
+               write_tree = cc->write_tree;
+               cc->write_tree = RB_ROOT;
+               spin_unlock_irq(&cc->write_thread_wait.lock);
+
+               BUG_ON(rb_parent(write_tree.rb_node));
+
+               /*
+                * Note: we cannot walk the tree here with rb_next because
+                * the structures may be freed when kcryptd_io_write is called.
+                */
+               blk_start_plug(&plug);
+               do {
+                       io = crypt_io_from_node(rb_first(&write_tree));
+                       rb_erase(&io->rb_node, &write_tree);
+                       kcryptd_io_write(io);
+               } while (!RB_EMPTY_ROOT(&write_tree));
+               blk_finish_plug(&plug);
+       }
+       return 0;
 }
 
 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 {
        struct bio *clone = io->ctx.bio_out;
        struct crypt_config *cc = io->cc;
+       unsigned long flags;
+       sector_t sector;
+       struct rb_node **rbp, *parent;
 
        if (unlikely(io->error < 0)) {
                crypt_free_buffer_pages(cc, clone);
@@ -1182,20 +1249,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 
        clone->bi_iter.bi_sector = cc->start + io->sector;
 
-       if (async)
-               kcryptd_queue_io(io);
-       else
+       if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
                generic_make_request(clone);
+               return;
+       }
+
+       spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
+       rbp = &cc->write_tree.rb_node;
+       parent = NULL;
+       sector = io->sector;
+       while (*rbp) {
+               parent = *rbp;
+               if (sector < crypt_io_from_node(parent)->sector)
+                       rbp = &(*rbp)->rb_left;
+               else
+                       rbp = &(*rbp)->rb_right;
+       }
+       rb_link_node(&io->rb_node, parent, rbp);
+       rb_insert_color(&io->rb_node, &cc->write_tree);
+
+       wake_up_locked(&cc->write_thread_wait);
+       spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
 }
 
 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *clone;
-       struct dm_crypt_io *new_io;
        int crypt_finished;
-       unsigned out_of_pages = 0;
-       unsigned remaining = io->base_bio->bi_iter.bi_size;
        sector_t sector = io->sector;
        int r;
 
@@ -1205,80 +1286,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        crypt_inc_pending(io);
        crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
 
-       /*
-        * The allocated buffers can be smaller than the whole bio,
-        * so repeat the whole process until all the data can be handled.
-        */
-       while (remaining) {
-               clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
-               if (unlikely(!clone)) {
-                       io->error = -ENOMEM;
-                       break;
-               }
-
-               io->ctx.bio_out = clone;
-               io->ctx.iter_out = clone->bi_iter;
-
-               remaining -= clone->bi_iter.bi_size;
-               sector += bio_sectors(clone);
-
-               crypt_inc_pending(io);
-
-               r = crypt_convert(cc, &io->ctx);
-               if (r < 0)
-                       io->error = -EIO;
-
-               crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
-
-               /* Encryption was already finished, submit io now */
-               if (crypt_finished) {
-                       kcryptd_crypt_write_io_submit(io, 0);
-
-                       /*
-                        * If there was an error, do not try next fragments.
-                        * For async, error is processed in async handler.
-                        */
-                       if (unlikely(r < 0))
-                               break;
+       clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+       if (unlikely(!clone)) {
+               io->error = -EIO;
+               goto dec;
+       }
 
-                       io->sector = sector;
-               }
+       io->ctx.bio_out = clone;
+       io->ctx.iter_out = clone->bi_iter;
 
-               /*
-                * Out of memory -> run queues
-                * But don't wait if split was due to the io size restriction
-                */
-               if (unlikely(out_of_pages))
-                       congestion_wait(BLK_RW_ASYNC, HZ/100);
+       sector += bio_sectors(clone);
 
-               /*
-                * With async crypto it is unsafe to share the crypto context
-                * between fragments, so switch to a new dm_crypt_io structure.
-                */
-               if (unlikely(!crypt_finished && remaining)) {
-                       new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
-                       crypt_io_init(new_io, io->cc, io->base_bio, sector);
-                       crypt_inc_pending(new_io);
-                       crypt_convert_init(cc, &new_io->ctx, NULL,
-                                          io->base_bio, sector);
-                       new_io->ctx.iter_in = io->ctx.iter_in;
-
-                       /*
-                        * Fragments after the first use the base_io
-                        * pending count.
-                        */
-                       if (!io->base_io)
-                               new_io->base_io = io;
-                       else {
-                               new_io->base_io = io->base_io;
-                               crypt_inc_pending(io->base_io);
-                               crypt_dec_pending(io);
-                       }
+       crypt_inc_pending(io);
+       r = crypt_convert(cc, &io->ctx);
+       if (r)
+               io->error = -EIO;
+       crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
 
-                       io = new_io;
-               }
+       /* Encryption was already finished, submit io now */
+       if (crypt_finished) {
+               kcryptd_crypt_write_io_submit(io, 0);
+               io->sector = sector;
        }
 
+dec:
        crypt_dec_pending(io);
 }
 
@@ -1481,6 +1512,9 @@ static void crypt_dtr(struct dm_target *ti)
        if (!cc)
                return;
 
+       if (cc->write_thread)
+               kthread_stop(cc->write_thread);
+
        if (cc->io_queue)
                destroy_workqueue(cc->io_queue);
        if (cc->crypt_queue)
@@ -1495,8 +1529,6 @@ static void crypt_dtr(struct dm_target *ti)
                mempool_destroy(cc->page_pool);
        if (cc->req_pool)
                mempool_destroy(cc->req_pool);
-       if (cc->io_pool)
-               mempool_destroy(cc->io_pool);
 
        if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
                cc->iv_gen_ops->dtr(cc);
@@ -1688,7 +1720,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        char dummy;
 
        static struct dm_arg _args[] = {
-               {0, 1, "Invalid number of feature args"},
+               {0, 3, "Invalid number of feature args"},
        };
 
        if (argc < 5) {
@@ -1710,13 +1742,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (ret < 0)
                goto bad;
 
-       ret = -ENOMEM;
-       cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
-       if (!cc->io_pool) {
-               ti->error = "Cannot allocate crypt io mempool";
-               goto bad;
-       }
-
        cc->dmreq_start = sizeof(struct ablkcipher_request);
        cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
        cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
@@ -1734,6 +1759,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
        }
 
+       ret = -ENOMEM;
        cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
                        sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
        if (!cc->req_pool) {
@@ -1746,7 +1772,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
                      ARCH_KMALLOC_MINALIGN);
 
-       cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+       cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
        if (!cc->page_pool) {
                ti->error = "Cannot allocate page mempool";
                goto bad;
@@ -1758,6 +1784,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
+       mutex_init(&cc->bio_alloc_lock);
+
        ret = -EINVAL;
        if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
                ti->error = "Invalid iv_offset sector";
@@ -1788,15 +1816,26 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                if (ret)
                        goto bad;
 
-               opt_string = dm_shift_arg(&as);
+               while (opt_params--) {
+                       opt_string = dm_shift_arg(&as);
+                       if (!opt_string) {
+                               ti->error = "Not enough feature arguments";
+                               goto bad;
+                       }
 
-               if (opt_params == 1 && opt_string &&
-                   !strcasecmp(opt_string, "allow_discards"))
-                       ti->num_discard_bios = 1;
-               else if (opt_params) {
-                       ret = -EINVAL;
-                       ti->error = "Invalid feature arguments";
-                       goto bad;
+                       if (!strcasecmp(opt_string, "allow_discards"))
+                               ti->num_discard_bios = 1;
+
+                       else if (!strcasecmp(opt_string, "same_cpu_crypt"))
+                               set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+
+                       else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
+                               set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+
+                       else {
+                               ti->error = "Invalid feature arguments";
+                               goto bad;
+                       }
                }
        }
 
@@ -1807,13 +1846,28 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       cc->crypt_queue = alloc_workqueue("kcryptd",
-                                         WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+       if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+       else
+               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+                                                 num_online_cpus());
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
                goto bad;
        }
 
+       init_waitqueue_head(&cc->write_thread_wait);
+       cc->write_tree = RB_ROOT;
+
+       cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+       if (IS_ERR(cc->write_thread)) {
+               ret = PTR_ERR(cc->write_thread);
+               cc->write_thread = NULL;
+               ti->error = "Couldn't spawn write thread";
+               goto bad;
+       }
+       wake_up_process(cc->write_thread);
+
        ti->num_flush_bios = 1;
        ti->discard_zeroes_data_unsupported = true;
 
@@ -1848,7 +1902,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
-                       kcryptd_queue_io(io);
+                       kcryptd_queue_read(io);
        } else
                kcryptd_queue_crypt(io);
 
@@ -1860,6 +1914,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 {
        struct crypt_config *cc = ti->private;
        unsigned i, sz = 0;
+       int num_feature_args = 0;
 
        switch (type) {
        case STATUSTYPE_INFO:
@@ -1878,8 +1933,18 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
                DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
                                cc->dev->name, (unsigned long long)cc->start);
 
-               if (ti->num_discard_bios)
-                       DMEMIT(" 1 allow_discards");
+               num_feature_args += !!ti->num_discard_bios;
+               num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+               num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+               if (num_feature_args) {
+                       DMEMIT(" %d", num_feature_args);
+                       if (ti->num_discard_bios)
+                               DMEMIT(" allow_discards");
+                       if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+                               DMEMIT(" same_cpu_crypt");
+                       if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
+                               DMEMIT(" submit_from_crypt_cpus");
+               }
 
                break;
        }
@@ -1976,7 +2041,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 13, 0},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
@@ -1994,15 +2059,9 @@ static int __init dm_crypt_init(void)
 {
        int r;
 
-       _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
-       if (!_crypt_io_pool)
-               return -ENOMEM;
-
        r = dm_register_target(&crypt_target);
-       if (r < 0) {
+       if (r < 0)
                DMERR("register failed %d", r);
-               kmem_cache_destroy(_crypt_io_pool);
-       }
 
        return r;
 }
@@ -2010,7 +2069,6 @@ static int __init dm_crypt_init(void)
 static void __exit dm_crypt_exit(void)
 {
        dm_unregister_target(&crypt_target);
-       kmem_cache_destroy(_crypt_io_pool);
 }
 
 module_init(dm_crypt_init);
index c09359db3a90730dbd32b3bd733709f3c6444192..37de0173b6d2324ed15de95442df37bc5a990e16 100644 (file)
@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
        unsigned short logical_block_size = queue_logical_block_size(q);
        sector_t num_sectors;
 
+       /* Reject unsupported discard requests */
+       if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+               dec_count(io, region, -EOPNOTSUPP);
+               return;
+       }
+
        /*
         * where->count may be zero if rw holds a flush and we need to
         * send a zero-sized flush.
index 7dfdb5c746d6f31960902350c33b7457485caadb..089d62751f7ff2a3aedf7e441cb88bec0d06b8a7 100644 (file)
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
                return;
        }
 
+       /*
+        * If the bio is discard, return an error, but do not
+        * degrade the array.
+        */
+       if (bio->bi_rw & REQ_DISCARD) {
+               bio_endio(bio, -EOPNOTSUPP);
+               return;
+       }
+
        for (i = 0; i < ms->nr_mirrors; i++)
                if (test_bit(i, &error))
                        fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
index 864b03f477276f9c01bee8fd34a25e2303af1116..8b204ae216ab62d354c814277dc413c34f0bf9a4 100644 (file)
@@ -1432,8 +1432,6 @@ out:
                full_bio->bi_private = pe->full_bio_private;
                atomic_inc(&full_bio->bi_remaining);
        }
-       free_pending_exception(pe);
-
        increment_pending_exceptions_done_count();
 
        up_write(&s->lock);
@@ -1450,6 +1448,8 @@ out:
        }
 
        retry_origin_bios(s, origin_bios);
+
+       free_pending_exception(pe);
 }
 
 static void commit_callback(void *context, int success)
index ec1444f49de14ac185ae39cfb214deee3ba66998..73f28802dc7abc3cb46dc38c8ef6fb5bb521e66b 100644 (file)
@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md)
        return 0;
 }
 
-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
 {
        struct mapped_device *md;
        unsigned minor = MINOR(dev);
@@ -2582,12 +2582,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
        spin_lock(&_minor_lock);
 
        md = idr_find(&_minor_idr, minor);
-       if (md && (md == MINOR_ALLOCED ||
-                  (MINOR(disk_devt(dm_disk(md))) != minor) ||
-                  dm_deleting_md(md) ||
-                  test_bit(DMF_FREEING, &md->flags))) {
-               md = NULL;
-               goto out;
+       if (md) {
+               if ((md == MINOR_ALLOCED ||
+                    (MINOR(disk_devt(dm_disk(md))) != minor) ||
+                    dm_deleting_md(md) ||
+                    test_bit(DMF_FREEING, &md->flags))) {
+                       md = NULL;
+                       goto out;
+               }
+               dm_get(md);
        }
 
 out:
@@ -2595,16 +2598,6 @@ out:
 
        return md;
 }
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
-       struct mapped_device *md = dm_find_md(dev);
-
-       if (md)
-               dm_get(md);
-
-       return md;
-}
 EXPORT_SYMBOL_GPL(dm_get_md);
 
 void *dm_get_mdptr(struct mapped_device *md)
index c8d2bac4e28be4a65edb63d613b0336b7ff35783..cadf9cc02b2561ade72800e9a0986d7e23b09163 100644 (file)
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
        return err ? err : len;
 }
 static struct rdev_sysfs_entry rdev_state =
-__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
 
 static ssize_t
 errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
        return err ?: len;
 }
 static struct md_sysfs_entry md_resync_start =
-__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
+__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
+               resync_start_show, resync_start_store);
 
 /*
  * The array state can be:
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        return err ?: len;
 }
 static struct md_sysfs_entry md_array_state =
-__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
 
 static ssize_t
 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@ out_unlock:
 }
 
 static struct md_sysfs_entry md_metadata =
-__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
 
 static ssize_t
 action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
 }
 
 static struct md_sysfs_entry md_scan_mode =
-__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
 
 static ssize_t
 last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page)
        return sprintf(page, "%llu / %llu\n", resync, max_sectors);
 }
 
-static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
+static struct md_sysfs_entry md_sync_completed =
+       __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
 
 static ssize_t
 min_sync_show(struct mddev *mddev, char *page)
index 0c2dec7aec20fd798d45b24d92156b3f85f4aae0..78c74bb71ba42f11ff5035a5a593ed5732df9791 100644 (file)
@@ -8,7 +8,7 @@ config DM_PERSISTENT_DATA
         device-mapper targets such as the thin provisioning target.
 
 config DM_DEBUG_BLOCK_STACK_TRACING
-       boolean "Keep stack trace of persistent data block lock holders"
+       bool "Keep stack trace of persistent data block lock holders"
        depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
        select STACKTRACE
        ---help---
index cfbf9617e4658bd6aa84f45c840bda6f56a8402a..ebb280a14325e1d937986926b40592c3b1847168 100644 (file)
@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
        if (r)
                return r;
 
-       return count > 1;
+       *result = count > 1;
+
+       return 0;
 }
 
 static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
index 4153da5d40111844616e8247a78e16c561602395..d34e238afa54c24ccaefbc7c6d58974dc2104be6 100644 (file)
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                if (test_bit(WriteMostly, &rdev->flags)) {
                        /* Don't balance among write-mostly, just
                         * use the first as a last resort */
-                       if (best_disk < 0) {
+                       if (best_dist_disk < 0) {
                                if (is_badblock(rdev, this_sector, sectors,
                                                &first_bad, &bad_sectors)) {
                                        if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                                        best_good_sectors = first_bad - this_sector;
                                } else
                                        best_good_sectors = sectors;
-                               best_disk = disk;
+                               best_dist_disk = disk;
+                               best_pending_disk = disk;
                        }
                        continue;
                }
index e75d48c0421a41788c9159ef7e74d22ad93d9695..cd2f96b2c57263628ef0816af3b114ad9437b740 100644 (file)
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
                schedule_timeout_uninterruptible(1);
        }
        /* Need to check if array will still be degraded after recovery/resync
-        * We don't need to check the 'failed' flag as when that gets set,
-        * recovery aborts.
+        * Note in case of > 1 drive failures it's possible we're rebuilding
+        * one drive while leaving another faulty drive in array.
         */
-       for (i = 0; i < conf->raid_disks; i++)
-               if (conf->disks[i].rdev == NULL)
+       rcu_read_lock();
+       for (i = 0; i < conf->raid_disks; i++) {
+               struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+
+               if (rdev == NULL || test_bit(Faulty, &rdev->flags))
                        still_degraded = 1;
+       }
+       rcu_read_unlock();
 
        bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
 
index 3a26045801645783ea6680f14b076b4a929e74e3..d2a85cde68da50c2efe49a52d87cdc739b5aeef0 100644 (file)
@@ -1111,7 +1111,7 @@ static int verify_addr(struct i2c_client *i2c)
        return 0;
 }
 
-static struct regmap_config pm860x_regmap_config = {
+static const struct regmap_config pm860x_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
index 2e6b7311fabc896f082aae33779025fb7fa93b69..38356e39adba0b5c4515ef76eed35b3a55de9be3 100644 (file)
@@ -195,6 +195,18 @@ config MFD_DA9063
          Additional drivers must be enabled in order to use the functionality
          of the device.
 
+config MFD_DA9150
+       tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip"
+       depends on I2C=y
+       select MFD_CORE
+       select REGMAP_I2C
+       select REGMAP_IRQ
+       help
+         This adds support for the DA9150 integrated charger and fuel-gauge
+         chip. This driver provides common support for accessing the device.
+         Additional drivers must be enabled in order to use the specific
+         features of the device.
+
 config MFD_DLN2
        tristate "Diolan DLN2 support"
        select MFD_CORE
@@ -417,6 +429,7 @@ config MFD_MAX14577
 config MFD_MAX77686
        bool "Maxim Semiconductor MAX77686/802 PMIC Support"
        depends on I2C=y
+       depends on OF
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
@@ -589,6 +602,20 @@ config MFD_PM8921_CORE
          Say M here if you want to include support for PM8921 chip as a module.
          This will build a module called "pm8921-core".
 
+config MFD_QCOM_RPM
+       tristate "Qualcomm Resource Power Manager (RPM)"
+       depends on ARCH_QCOM && OF
+       help
+         If you say yes to this option, support will be included for the
+         Resource Power Manager system found in the Qualcomm 8660, 8960 and
+         8064 based devices.
+
+         This is required to access many regulators, clocks and bus
+         frequencies controlled by the RPM on these devices.
+
+         Say M here if you want to include support for the Qualcomm RPM as a
+         module. This will build a module called "qcom_rpm".
+
 config MFD_SPMI_PMIC
        tristate "Qualcomm SPMI PMICs"
        depends on ARCH_QCOM || COMPILE_TEST
@@ -623,6 +650,18 @@ config MFD_RTSX_PCI
          types of memory cards, such as Memory Stick, Memory Stick Pro,
          Secure Digital and MultiMediaCard.
 
+config MFD_RT5033
+       tristate "Richtek RT5033 Power Management IC"
+       depends on I2C=y
+       select MFD_CORE
+       select REGMAP_I2C
+       help
+         This driver provides for the Richtek RT5033 Power Management IC,
+         which includes the I2C driver and the Core APIs. This driver provides
+         common support for accessing the device. The device supports multiple
+         sub-devices like charger, fuel gauge, flash LED, current source,
+         LDO and Buck.
+
 config MFD_RTSX_USB
        tristate "Realtek USB card reader"
        depends on USB
index 53467e21138118e881928944d5ae5651d3ca10cc..19f3d744e3bdad95f69337faf7c7c1ffc3058d67 100644 (file)
@@ -113,7 +113,7 @@ obj-$(CONFIG_MFD_DA9055)    += da9055.o
 
 da9063-objs                    := da9063-core.o da9063-irq.o da9063-i2c.o
 obj-$(CONFIG_MFD_DA9063)       += da9063.o
-
+obj-$(CONFIG_MFD_DA9150)       += da9150-core.o
 obj-$(CONFIG_MFD_MAX14577)     += max14577.o
 obj-$(CONFIG_MFD_MAX77686)     += max77686.o
 obj-$(CONFIG_MFD_MAX77693)     += max77693.o
@@ -153,6 +153,7 @@ obj-$(CONFIG_MFD_SI476X_CORE)       += si476x-core.o
 obj-$(CONFIG_MFD_CS5535)       += cs5535-mfd.o
 obj-$(CONFIG_MFD_OMAP_USB_HOST)        += omap-usb-host.o omap-usb-tll.o
 obj-$(CONFIG_MFD_PM8921_CORE)  += pm8921-core.o ssbi.o
+obj-$(CONFIG_MFD_QCOM_RPM)     += qcom_rpm.o
 obj-$(CONFIG_MFD_SPMI_PMIC)    += qcom-spmi-pmic.o
 obj-$(CONFIG_TPS65911_COMPARATOR)      += tps65911-comparator.o
 obj-$(CONFIG_MFD_TPS65090)     += tps65090.o
@@ -176,6 +177,7 @@ obj-$(CONFIG_MFD_IPAQ_MICRO)        += ipaq-micro.o
 obj-$(CONFIG_MFD_MENF21BMC)    += menf21bmc.o
 obj-$(CONFIG_MFD_HI6421_PMIC)  += hi6421-pmic-core.o
 obj-$(CONFIG_MFD_DLN2)         += dln2.o
+obj-$(CONFIG_MFD_RT5033)       += rt5033.o
 
 intel-soc-pmic-objs            := intel_soc_pmic_core.o intel_soc_pmic_crc.o
 obj-$(CONFIG_INTEL_SOC_PMIC)   += intel-soc-pmic.o
index f38bc98a3c57a60633b268bdee67136b58962918..facd3610ac77f3b3fe19470181e26c9f4335a340 100644 (file)
@@ -86,6 +86,7 @@ static const struct mfd_cell da9063_devs[] = {
        },
        {
                .name           = DA9063_DRVNAME_WATCHDOG,
+               .of_compatible  = "dlg,da9063-watchdog",
        },
        {
                .name           = DA9063_DRVNAME_HWMON,
@@ -101,6 +102,7 @@ static const struct mfd_cell da9063_devs[] = {
                .name           = DA9063_DRVNAME_RTC,
                .num_resources  = ARRAY_SIZE(da9063_rtc_resources),
                .resources      = da9063_rtc_resources,
+               .of_compatible  = "dlg,da9063-rtc",
        },
        {
                .name           = DA9063_DRVNAME_VIBRATION,
index 21fd8d9a217b01d2992e1225e6dae1e3a043c376..6f3a7c0001f9f2721dd43db7048f09ea8c72d6d1 100644 (file)
@@ -25,6 +25,9 @@
 #include <linux/mfd/da9063/pdata.h>
 #include <linux/mfd/da9063/registers.h>
 
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
+
 static const struct regmap_range da9063_ad_readable_ranges[] = {
        {
                .range_min = DA9063_REG_PAGE_CON,
@@ -203,6 +206,11 @@ static struct regmap_config da9063_regmap_config = {
        .cache_type = REGCACHE_RBTREE,
 };
 
+static const struct of_device_id da9063_dt_ids[] = {
+       { .compatible = "dlg,da9063", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, da9063_dt_ids);
 static int da9063_i2c_probe(struct i2c_client *i2c,
        const struct i2c_device_id *id)
 {
@@ -257,6 +265,7 @@ static struct i2c_driver da9063_i2c_driver = {
        .driver = {
                .name = "da9063",
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(da9063_dt_ids),
        },
        .probe    = da9063_i2c_probe,
        .remove   = da9063_i2c_remove,
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
new file mode 100644 (file)
index 0000000..4d757b9
--- /dev/null
@@ -0,0 +1,413 @@
+/*
+ * DA9150 Core MFD Driver
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/da9150/core.h>
+#include <linux/mfd/da9150/registers.h>
+
+static bool da9150_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case DA9150_PAGE_CON:
+       case DA9150_STATUS_A:
+       case DA9150_STATUS_B:
+       case DA9150_STATUS_C:
+       case DA9150_STATUS_D:
+       case DA9150_STATUS_E:
+       case DA9150_STATUS_F:
+       case DA9150_STATUS_G:
+       case DA9150_STATUS_H:
+       case DA9150_STATUS_I:
+       case DA9150_STATUS_J:
+       case DA9150_STATUS_K:
+       case DA9150_STATUS_L:
+       case DA9150_STATUS_N:
+       case DA9150_FAULT_LOG_A:
+       case DA9150_FAULT_LOG_B:
+       case DA9150_EVENT_E:
+       case DA9150_EVENT_F:
+       case DA9150_EVENT_G:
+       case DA9150_EVENT_H:
+       case DA9150_CONTROL_B:
+       case DA9150_CONTROL_C:
+       case DA9150_GPADC_MAN:
+       case DA9150_GPADC_RES_A:
+       case DA9150_GPADC_RES_B:
+       case DA9150_ADETVB_CFG_C:
+       case DA9150_ADETD_STAT:
+       case DA9150_ADET_CMPSTAT:
+       case DA9150_ADET_CTRL_A:
+       case DA9150_PPR_TCTR_B:
+       case DA9150_COREBTLD_STAT_A:
+       case DA9150_CORE_DATA_A:
+       case DA9150_CORE_DATA_B:
+       case DA9150_CORE_DATA_C:
+       case DA9150_CORE_DATA_D:
+       case DA9150_CORE2WIRE_STAT_A:
+       case DA9150_FW_CTRL_C:
+       case DA9150_FG_CTRL_B:
+       case DA9150_FW_CTRL_B:
+       case DA9150_GPADC_CMAN:
+       case DA9150_GPADC_CRES_A:
+       case DA9150_GPADC_CRES_B:
+       case DA9150_CC_ICHG_RES_A:
+       case DA9150_CC_ICHG_RES_B:
+       case DA9150_CC_IAVG_RES_A:
+       case DA9150_CC_IAVG_RES_B:
+       case DA9150_TAUX_CTRL_A:
+       case DA9150_TAUX_VALUE_H:
+       case DA9150_TAUX_VALUE_L:
+       case DA9150_TBAT_RES_A:
+       case DA9150_TBAT_RES_B:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_range_cfg da9150_range_cfg[] = {
+       {
+               .range_min = DA9150_PAGE_CON,
+               .range_max = DA9150_TBAT_RES_B,
+               .selector_reg = DA9150_PAGE_CON,
+               .selector_mask = DA9150_I2C_PAGE_MASK,
+               .selector_shift = DA9150_I2C_PAGE_SHIFT,
+               .window_start = 0,
+               .window_len = 256,
+       },
+};
+
+static struct regmap_config da9150_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .ranges = da9150_range_cfg,
+       .num_ranges = ARRAY_SIZE(da9150_range_cfg),
+       .max_register = DA9150_TBAT_RES_B,
+
+       .cache_type = REGCACHE_RBTREE,
+
+       .volatile_reg = da9150_volatile_reg,
+};
+
+u8 da9150_reg_read(struct da9150 *da9150, u16 reg)
+{
+       int val, ret;
+
+       ret = regmap_read(da9150->regmap, reg, &val);
+       if (ret)
+               dev_err(da9150->dev, "Failed to read from reg 0x%x: %d\n",
+                       reg, ret);
+
+       return (u8) val;
+}
+EXPORT_SYMBOL_GPL(da9150_reg_read);
+
+void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val)
+{
+       int ret;
+
+       ret = regmap_write(da9150->regmap, reg, val);
+       if (ret)
+               dev_err(da9150->dev, "Failed to write to reg 0x%x: %d\n",
+                       reg, ret);
+}
+EXPORT_SYMBOL_GPL(da9150_reg_write);
+
+void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val)
+{
+       int ret;
+
+       ret = regmap_update_bits(da9150->regmap, reg, mask, val);
+       if (ret)
+               dev_err(da9150->dev, "Failed to set bits in reg 0x%x: %d\n",
+                       reg, ret);
+}
+EXPORT_SYMBOL_GPL(da9150_set_bits);
+
+void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf)
+{
+       int ret;
+
+       ret = regmap_bulk_read(da9150->regmap, reg, buf, count);
+       if (ret)
+               dev_err(da9150->dev, "Failed to bulk read from reg 0x%x: %d\n",
+                       reg, ret);
+}
+EXPORT_SYMBOL_GPL(da9150_bulk_read);
+
+void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf)
+{
+       int ret;
+
+       ret = regmap_raw_write(da9150->regmap, reg, buf, count);
+       if (ret)
+               dev_err(da9150->dev, "Failed to bulk write to reg 0x%x %d\n",
+                       reg, ret);
+}
+EXPORT_SYMBOL_GPL(da9150_bulk_write);
+
+static struct regmap_irq da9150_irqs[] = {
+       [DA9150_IRQ_VBUS] = {
+               .reg_offset = 0,
+               .mask = DA9150_E_VBUS_MASK,
+       },
+       [DA9150_IRQ_CHG] = {
+               .reg_offset = 0,
+               .mask = DA9150_E_CHG_MASK,
+       },
+       [DA9150_IRQ_TCLASS] = {
+               .reg_offset = 0,
+               .mask = DA9150_E_TCLASS_MASK,
+       },
+       [DA9150_IRQ_TJUNC] = {
+               .reg_offset = 0,
+               .mask = DA9150_E_TJUNC_MASK,
+       },
+       [DA9150_IRQ_VFAULT] = {
+               .reg_offset = 0,
+               .mask = DA9150_E_VFAULT_MASK,
+       },
+       [DA9150_IRQ_CONF] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_CONF_MASK,
+       },
+       [DA9150_IRQ_DAT] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_DAT_MASK,
+       },
+       [DA9150_IRQ_DTYPE] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_DTYPE_MASK,
+       },
+       [DA9150_IRQ_ID] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_ID_MASK,
+       },
+       [DA9150_IRQ_ADP] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_ADP_MASK,
+       },
+       [DA9150_IRQ_SESS_END] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_SESS_END_MASK,
+       },
+       [DA9150_IRQ_SESS_VLD] = {
+               .reg_offset = 1,
+               .mask = DA9150_E_SESS_VLD_MASK,
+       },
+       [DA9150_IRQ_FG] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_FG_MASK,
+       },
+       [DA9150_IRQ_GP] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_GP_MASK,
+       },
+       [DA9150_IRQ_TBAT] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_TBAT_MASK,
+       },
+       [DA9150_IRQ_GPIOA] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_GPIOA_MASK,
+       },
+       [DA9150_IRQ_GPIOB] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_GPIOB_MASK,
+       },
+       [DA9150_IRQ_GPIOC] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_GPIOC_MASK,
+       },
+       [DA9150_IRQ_GPIOD] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_GPIOD_MASK,
+       },
+       [DA9150_IRQ_GPADC] = {
+               .reg_offset = 2,
+               .mask = DA9150_E_GPADC_MASK,
+       },
+       [DA9150_IRQ_WKUP] = {
+               .reg_offset = 3,
+               .mask = DA9150_E_WKUP_MASK,
+       },
+};
+
+static struct regmap_irq_chip da9150_regmap_irq_chip = {
+       .name = "da9150_irq",
+       .status_base = DA9150_EVENT_E,
+       .mask_base = DA9150_IRQ_MASK_E,
+       .ack_base = DA9150_EVENT_E,
+       .num_regs = DA9150_NUM_IRQ_REGS,
+       .irqs = da9150_irqs,
+       .num_irqs = ARRAY_SIZE(da9150_irqs),
+};
+
+static struct resource da9150_gpadc_resources[] = {
+       {
+               .name = "GPADC",
+               .start = DA9150_IRQ_GPADC,
+               .end = DA9150_IRQ_GPADC,
+               .flags = IORESOURCE_IRQ,
+       },
+};
+
+static struct resource da9150_charger_resources[] = {
+       {
+               .name = "CHG_STATUS",
+               .start = DA9150_IRQ_CHG,
+               .end = DA9150_IRQ_CHG,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name = "CHG_TJUNC",
+               .start = DA9150_IRQ_TJUNC,
+               .end = DA9150_IRQ_TJUNC,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name = "CHG_VFAULT",
+               .start = DA9150_IRQ_VFAULT,
+               .end = DA9150_IRQ_VFAULT,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name = "CHG_VBUS",
+               .start = DA9150_IRQ_VBUS,
+               .end = DA9150_IRQ_VBUS,
+               .flags = IORESOURCE_IRQ,
+       },
+};
+
+static struct mfd_cell da9150_devs[] = {
+       {
+               .name = "da9150-gpadc",
+               .of_compatible = "dlg,da9150-gpadc",
+               .resources = da9150_gpadc_resources,
+               .num_resources = ARRAY_SIZE(da9150_gpadc_resources),
+       },
+       {
+               .name = "da9150-charger",
+               .of_compatible = "dlg,da9150-charger",
+               .resources = da9150_charger_resources,
+               .num_resources = ARRAY_SIZE(da9150_charger_resources),
+       },
+};
+
+static int da9150_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct da9150 *da9150;
+       struct da9150_pdata *pdata = dev_get_platdata(&client->dev);
+       int ret;
+
+       da9150 = devm_kzalloc(&client->dev, sizeof(*da9150), GFP_KERNEL);
+       if (!da9150)
+               return -ENOMEM;
+
+       da9150->dev = &client->dev;
+       da9150->irq = client->irq;
+       i2c_set_clientdata(client, da9150);
+
+       da9150->regmap = devm_regmap_init_i2c(client, &da9150_regmap_config);
+       if (IS_ERR(da9150->regmap)) {
+               ret = PTR_ERR(da9150->regmap);
+               dev_err(da9150->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
+       da9150->irq_base = pdata ? pdata->irq_base : -1;
+
+       ret = regmap_add_irq_chip(da9150->regmap, da9150->irq,
+                                 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                 da9150->irq_base, &da9150_regmap_irq_chip,
+                                 &da9150->regmap_irq_data);
+       if (ret)
+               return ret;
+
+       da9150->irq_base = regmap_irq_chip_get_base(da9150->regmap_irq_data);
+       enable_irq_wake(da9150->irq);
+
+       ret = mfd_add_devices(da9150->dev, -1, da9150_devs,
+                             ARRAY_SIZE(da9150_devs), NULL,
+                             da9150->irq_base, NULL);
+       if (ret) {
+               dev_err(da9150->dev, "Failed to add child devices: %d\n", ret);
+               regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int da9150_remove(struct i2c_client *client)
+{
+       struct da9150 *da9150 = i2c_get_clientdata(client);
+
+       regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
+       mfd_remove_devices(da9150->dev);
+
+       return 0;
+}
+
+static void da9150_shutdown(struct i2c_client *client)
+{
+       struct da9150 *da9150 = i2c_get_clientdata(client);
+
+       /* Make sure we have a wakup source for the device */
+       da9150_set_bits(da9150, DA9150_CONFIG_D,
+                       DA9150_WKUP_PM_EN_MASK,
+                       DA9150_WKUP_PM_EN_MASK);
+
+       /* Set device to DISABLED mode */
+       da9150_set_bits(da9150, DA9150_CONTROL_C,
+                       DA9150_DISABLE_MASK, DA9150_DISABLE_MASK);
+}
+
+static const struct i2c_device_id da9150_i2c_id[] = {
+       { "da9150", },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, da9150_i2c_id);
+
+static const struct of_device_id da9150_of_match[] = {
+       { .compatible = "dlg,da9150", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, da9150_of_match);
+
+static struct i2c_driver da9150_driver = {
+       .driver = {
+               .name   = "da9150",
+               .of_match_table = of_match_ptr(da9150_of_match),
+       },
+       .probe          = da9150_probe,
+       .remove         = da9150_remove,
+       .shutdown       = da9150_shutdown,
+       .id_table       = da9150_i2c_id,
+};
+
+module_i2c_driver(da9150_driver);
+
+MODULE_DESCRIPTION("MFD Core Driver for DA9150");
+MODULE_AUTHOR("Adam Thomson <Adam.Thomson.Opensource@diasemi.com>");
+MODULE_LICENSE("GPL");
index c835e85539b2ae568cf92549126086f864f29efd..9bbc642a7b9db5bc53eb383db667abe399b2959f 100644 (file)
@@ -33,7 +33,7 @@
 
 #include <linux/mfd/davinci_voicecodec.h>
 
-static struct regmap_config davinci_vc_regmap = {
+static const struct regmap_config davinci_vc_regmap = {
        .reg_bits = 32,
        .val_bits = 32,
 };
index 16162bf43656a093320c0dac7c49a8b2dc199ec2..cc1a404328c294d6ed119801e3dbf613c832c0ba 100644 (file)
@@ -674,15 +674,6 @@ bool prcmu_has_arm_maxopp(void)
                PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
 }
 
-/**
- * prcmu_get_boot_status - PRCMU boot status checking
- * Returns: the current PRCMU boot status
- */
-int prcmu_get_boot_status(void)
-{
-       return readb(tcdm_base + PRCM_BOOT_STATUS);
-}
-
 /**
  * prcmu_set_rc_a2p - This function is used to run few power state sequences
  * @val: Value to be set, i.e. transition requested
index 6d49685d4ee4f48f1459258fabaad6d47a3b097b..1be9bd1c046d1e5279c9968e6babc893ad350f64 100644 (file)
@@ -587,12 +587,19 @@ static void dln2_free_rx_urbs(struct dln2_dev *dln2)
        int i;
 
        for (i = 0; i < DLN2_MAX_URBS; i++) {
-               usb_kill_urb(dln2->rx_urb[i]);
                usb_free_urb(dln2->rx_urb[i]);
                kfree(dln2->rx_buf[i]);
        }
 }
 
+static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
+{
+       int i;
+
+       for (i = 0; i < DLN2_MAX_URBS; i++)
+               usb_kill_urb(dln2->rx_urb[i]);
+}
+
 static void dln2_free(struct dln2_dev *dln2)
 {
        dln2_free_rx_urbs(dln2);
@@ -604,9 +611,7 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
                              struct usb_host_interface *hostif)
 {
        int i;
-       int ret;
        const int rx_max_size = DLN2_RX_BUF_SIZE;
-       struct device *dev = &dln2->interface->dev;
 
        for (i = 0; i < DLN2_MAX_URBS; i++) {
                dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
@@ -620,8 +625,19 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
                usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
                                  usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
                                  dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
+       }
 
-               ret = usb_submit_urb(dln2->rx_urb[i], GFP_KERNEL);
+       return 0;
+}
+
+static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
+{
+       struct device *dev = &dln2->interface->dev;
+       int ret;
+       int i;
+
+       for (i = 0; i < DLN2_MAX_URBS; i++) {
+               ret = usb_submit_urb(dln2->rx_urb[i], gfp);
                if (ret < 0) {
                        dev_err(dev, "failed to submit RX URB: %d\n", ret);
                        return ret;
@@ -665,9 +681,8 @@ static const struct mfd_cell dln2_devs[] = {
        },
 };
 
-static void dln2_disconnect(struct usb_interface *interface)
+static void dln2_stop(struct dln2_dev *dln2)
 {
-       struct dln2_dev *dln2 = usb_get_intfdata(interface);
        int i, j;
 
        /* don't allow starting new transfers */
@@ -696,6 +711,15 @@ static void dln2_disconnect(struct usb_interface *interface)
        /* wait for transfers to end */
        wait_event(dln2->disconnect_wq, !dln2->active_transfers);
 
+       dln2_stop_rx_urbs(dln2);
+}
+
+static void dln2_disconnect(struct usb_interface *interface)
+{
+       struct dln2_dev *dln2 = usb_get_intfdata(interface);
+
+       dln2_stop(dln2);
+
        mfd_remove_devices(&interface->dev);
 
        dln2_free(dln2);
@@ -738,28 +762,53 @@ static int dln2_probe(struct usb_interface *interface,
 
        ret = dln2_setup_rx_urbs(dln2, hostif);
        if (ret)
-               goto out_cleanup;
+               goto out_free;
+
+       ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
+       if (ret)
+               goto out_stop_rx;
 
        ret = dln2_hw_init(dln2);
        if (ret < 0) {
                dev_err(dev, "failed to initialize hardware\n");
-               goto out_cleanup;
+               goto out_stop_rx;
        }
 
        ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
        if (ret != 0) {
                dev_err(dev, "failed to add mfd devices to core\n");
-               goto out_cleanup;
+               goto out_stop_rx;
        }
 
        return 0;
 
-out_cleanup:
+out_stop_rx:
+       dln2_stop_rx_urbs(dln2);
+
+out_free:
        dln2_free(dln2);
 
        return ret;
 }
 
+static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
+{
+       struct dln2_dev *dln2 = usb_get_intfdata(iface);
+
+       dln2_stop(dln2);
+
+       return 0;
+}
+
+static int dln2_resume(struct usb_interface *iface)
+{
+       struct dln2_dev *dln2 = usb_get_intfdata(iface);
+
+       dln2->disconnect = false;
+
+       return dln2_start_rx_urbs(dln2, GFP_NOIO);
+}
+
 static const struct usb_device_id dln2_table[] = {
        { USB_DEVICE(0xa257, 0x2013) },
        { }
@@ -772,6 +821,8 @@ static struct usb_driver dln2_driver = {
        .probe = dln2_probe,
        .disconnect = dln2_disconnect,
        .id_table = dln2_table,
+       .suspend = dln2_suspend,
+       .resume = dln2_resume,
 };
 
 module_usb_driver(dln2_driver);
index 321a2656fd004493e4536701a5d9932e71ace71b..7210ae28bf816cbbb612a219b5f57fd4c94a149c 100644 (file)
@@ -35,7 +35,7 @@ static const struct mfd_cell hi6421_devs[] = {
        { .name = "hi6421-regulator", },
 };
 
-static struct regmap_config hi6421_regmap_config = {
+static const struct regmap_config hi6421_regmap_config = {
        .reg_bits = 32,
        .reg_stride = 4,
        .val_bits = 8,
index df7b0642a5b4d28e6fa02fbc11ccbb1b2716ef4c..80cef048b9040234593eded531c810f7152cd332 100644 (file)
@@ -64,6 +64,9 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
        config = (struct intel_soc_pmic_config *)id->driver_data;
 
        pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+       if (!pmic)
+               return -ENOMEM;
+
        dev_set_drvdata(dev, pmic);
 
        pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
index 33aacd9baddc87155cf7d70716b67c8f433cd451..9498d6719847761f7f7a2b61d259ef8722a12739 100644 (file)
@@ -23,7 +23,7 @@ struct intel_soc_pmic_config {
        unsigned long irq_flags;
        struct mfd_cell *cell_dev;
        int n_cell_devs;
-       struct regmap_config *regmap_config;
+       const struct regmap_config *regmap_config;
        struct regmap_irq_chip *irq_chip;
 };
 
index c85e2ecb868ab71c22ab7a2a4fc1eae2d923b7f8..4cc1b324e971735615156cc76a6985d4efd5da08 100644 (file)
@@ -111,7 +111,7 @@ static struct mfd_cell crystal_cove_dev[] = {
        },
 };
 
-static struct regmap_config crystal_cove_regmap_config = {
+static const struct regmap_config crystal_cove_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 
index 8c29f7b27324f4980e1b73f98b7186c10107120a..d42fbb667d8cf41db556ee3eb0d6f00e13ecc6bf 100644 (file)
@@ -583,7 +583,7 @@ static bool lm3533_precious_register(struct device *dev, unsigned int reg)
        }
 }
 
-static struct regmap_config regmap_config = {
+static const struct regmap_config regmap_config = {
        .reg_bits       = 8,
        .val_bits       = 8,
        .max_register   = LM3533_REG_MAX,
index 5c38df35a84d0e649882304811874e26f045ddda..a56e4ba5227b0a63dbe3adc94609d714d4a98153 100644 (file)
@@ -75,6 +75,7 @@ static struct lpc_sch_info sch_chipset_info[] = {
        [LPC_QUARK_X1000] = {
                .io_size_gpio = GPIO_IO_SIZE,
                .irq_gpio = GPIO_IRQ_QUARK_X1000,
+               .io_size_wdt = WDT_IO_SIZE,
        },
 };
 
index 929795eae9fcbf2df33f18b81606980b56f8d7ff..760d08d7923d42304df008dcd1e40aa472521657 100644 (file)
@@ -111,17 +111,17 @@ static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg)
                max77802_rtc_is_volatile_reg(dev, reg));
 }
 
-static struct regmap_config max77686_regmap_config = {
+static const struct regmap_config max77686_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
 
-static struct regmap_config max77686_rtc_regmap_config = {
+static const struct regmap_config max77686_rtc_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
 
-static struct regmap_config max77802_regmap_config = {
+static const struct regmap_config max77802_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .writeable_reg = max77802_is_accessible_reg,
@@ -205,24 +205,10 @@ static const struct of_device_id max77686_pmic_dt_match[] = {
        { },
 };
 
-static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
-                                                                 *dev)
-{
-       struct max77686_platform_data *pd;
-
-       pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-       if (!pd)
-               return NULL;
-
-       dev->platform_data = pd;
-       return pd;
-}
-
 static int max77686_i2c_probe(struct i2c_client *i2c,
                              const struct i2c_device_id *id)
 {
        struct max77686_dev *max77686 = NULL;
-       struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev);
        const struct of_device_id *match;
        unsigned int data;
        int ret = 0;
@@ -233,14 +219,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
        const struct mfd_cell *cells;
        int n_devs;
 
-       if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node && !pdata)
-               pdata = max77686_i2c_parse_dt_pdata(&i2c->dev);
-
-       if (!pdata) {
-               dev_err(&i2c->dev, "No platform data found.\n");
-               return -EINVAL;
-       }
-
        max77686 = devm_kzalloc(&i2c->dev,
                                sizeof(struct max77686_dev), GFP_KERNEL);
        if (!max77686)
@@ -259,7 +237,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
        max77686->dev = &i2c->dev;
        max77686->i2c = i2c;
 
-       max77686->wakeup = pdata->wakeup;
        max77686->irq = i2c->irq;
 
        if (max77686->type == TYPE_MAX77686) {
index ae3addb153a2e825f260959e1a52c93cc42051ef..68b84481156674891f1a5a1bedd7e3338b81a5fb 100644 (file)
@@ -46,7 +46,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
 
-static struct regmap_config mc13xxx_regmap_i2c_config = {
+static const struct regmap_config mc13xxx_regmap_i2c_config = {
        .reg_bits = 8,
        .val_bits = 24,
 
index 702925e242c90597618bb1158faffa653516eae6..58a170e45d88ddc61bce928610703ba48cb20844 100644 (file)
@@ -48,7 +48,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
 
-static struct regmap_config mc13xxx_regmap_spi_config = {
+static const struct regmap_config mc13xxx_regmap_spi_config = {
        .reg_bits = 7,
        .pad_bits = 1,
        .val_bits = 24,
index 04cd54dd507c2c722d088bb1bc35dfa4b79fe38c..1d924d1533c02756124dffe1262e4333ed9b6b5f 100644 (file)
@@ -129,16 +129,6 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg)
        return readl_relaxed(base + reg);
 }
 
-static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val)
-{
-       writeb_relaxed(val, base + reg);
-}
-
-static inline u8 usbhs_readb(void __iomem *base, u8 reg)
-{
-       return readb_relaxed(base + reg);
-}
-
 /*-------------------------------------------------------------------------*/
 
 /**
index 43664eb69c93f519dd72763799d7bee2dd6b4126..6155d123a84e762c3491674c12200e165c21ec94 100644 (file)
@@ -183,7 +183,7 @@ static int pcf50633_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
 
-static struct regmap_config pcf50633_regmap_config = {
+static const struct regmap_config pcf50633_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
new file mode 100644 (file)
index 0000000..f696328
--- /dev/null
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Author: Bjorn Andersson <bjorn.andersson@sonymobile.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+struct qcom_rpm_resource {
+       unsigned target_id;
+       unsigned status_id;
+       unsigned select_id;
+       unsigned size;
+};
+
+struct qcom_rpm_data {
+       u32 version;
+       const struct qcom_rpm_resource *resource_table;
+       unsigned n_resources;
+};
+
+struct qcom_rpm {
+       struct device *dev;
+       struct regmap *ipc_regmap;
+       unsigned ipc_offset;
+       unsigned ipc_bit;
+
+       struct completion ack;
+       struct mutex lock;
+
+       void __iomem *status_regs;
+       void __iomem *ctrl_regs;
+       void __iomem *req_regs;
+
+       u32 ack_status;
+
+       const struct qcom_rpm_data *data;
+};
+
+#define RPM_STATUS_REG(rpm, i) ((rpm)->status_regs + (i) * 4)
+#define RPM_CTRL_REG(rpm, i)   ((rpm)->ctrl_regs + (i) * 4)
+#define RPM_REQ_REG(rpm, i)    ((rpm)->req_regs + (i) * 4)
+
+#define RPM_REQUEST_TIMEOUT    (5 * HZ)
+
+#define RPM_REQUEST_CONTEXT    3
+#define RPM_REQ_SELECT         11
+#define RPM_ACK_CONTEXT                15
+#define RPM_ACK_SELECTOR       23
+#define RPM_SELECT_SIZE                7
+
+#define RPM_NOTIFICATION       BIT(30)
+#define RPM_REJECTED           BIT(31)
+
+#define RPM_SIGNAL             BIT(2)
+
+static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
+       [QCOM_RPM_CXO_CLK] =                    { 25, 9, 5, 1 },
+       [QCOM_RPM_PXO_CLK] =                    { 26, 10, 6, 1 },
+       [QCOM_RPM_APPS_FABRIC_CLK] =            { 27, 11, 8, 1 },
+       [QCOM_RPM_SYS_FABRIC_CLK] =             { 28, 12, 9, 1 },
+       [QCOM_RPM_MM_FABRIC_CLK] =              { 29, 13, 10, 1 },
+       [QCOM_RPM_DAYTONA_FABRIC_CLK] =         { 30, 14, 11, 1 },
+       [QCOM_RPM_SFPB_CLK] =                   { 31, 15, 12, 1 },
+       [QCOM_RPM_CFPB_CLK] =                   { 32, 16, 13, 1 },
+       [QCOM_RPM_MMFPB_CLK] =                  { 33, 17, 14, 1 },
+       [QCOM_RPM_EBI1_CLK] =                   { 34, 18, 16, 1 },
+       [QCOM_RPM_APPS_FABRIC_HALT] =           { 35, 19, 18, 1 },
+       [QCOM_RPM_APPS_FABRIC_MODE] =           { 37, 20, 19, 1 },
+       [QCOM_RPM_APPS_FABRIC_IOCTL] =          { 40, 21, 20, 1 },
+       [QCOM_RPM_APPS_FABRIC_ARB] =            { 41, 22, 21, 12 },
+       [QCOM_RPM_SYS_FABRIC_HALT] =            { 53, 23, 22, 1 },
+       [QCOM_RPM_SYS_FABRIC_MODE] =            { 55, 24, 23, 1 },
+       [QCOM_RPM_SYS_FABRIC_IOCTL] =           { 58, 25, 24, 1 },
+       [QCOM_RPM_SYS_FABRIC_ARB] =             { 59, 26, 25, 30 },
+       [QCOM_RPM_MM_FABRIC_HALT] =             { 89, 27, 26, 1 },
+       [QCOM_RPM_MM_FABRIC_MODE] =             { 91, 28, 27, 1 },
+       [QCOM_RPM_MM_FABRIC_IOCTL] =            { 94, 29, 28, 1 },
+       [QCOM_RPM_MM_FABRIC_ARB] =              { 95, 30, 29, 21 },
+       [QCOM_RPM_PM8921_SMPS1] =               { 116, 31, 30, 2 },
+       [QCOM_RPM_PM8921_SMPS2] =               { 118, 33, 31, 2 },
+       [QCOM_RPM_PM8921_SMPS3] =               { 120, 35, 32, 2 },
+       [QCOM_RPM_PM8921_SMPS4] =               { 122, 37, 33, 2 },
+       [QCOM_RPM_PM8921_SMPS5] =               { 124, 39, 34, 2 },
+       [QCOM_RPM_PM8921_SMPS6] =               { 126, 41, 35, 2 },
+       [QCOM_RPM_PM8921_SMPS7] =               { 128, 43, 36, 2 },
+       [QCOM_RPM_PM8921_SMPS8] =               { 130, 45, 37, 2 },
+       [QCOM_RPM_PM8921_LDO1] =                { 132, 47, 38, 2 },
+       [QCOM_RPM_PM8921_LDO2] =                { 134, 49, 39, 2 },
+       [QCOM_RPM_PM8921_LDO3] =                { 136, 51, 40, 2 },
+       [QCOM_RPM_PM8921_LDO4] =                { 138, 53, 41, 2 },
+       [QCOM_RPM_PM8921_LDO5] =                { 140, 55, 42, 2 },
+       [QCOM_RPM_PM8921_LDO6] =                { 142, 57, 43, 2 },
+       [QCOM_RPM_PM8921_LDO7] =                { 144, 59, 44, 2 },
+       [QCOM_RPM_PM8921_LDO8] =                { 146, 61, 45, 2 },
+       [QCOM_RPM_PM8921_LDO9] =                { 148, 63, 46, 2 },
+       [QCOM_RPM_PM8921_LDO10] =               { 150, 65, 47, 2 },
+       [QCOM_RPM_PM8921_LDO11] =               { 152, 67, 48, 2 },
+       [QCOM_RPM_PM8921_LDO12] =               { 154, 69, 49, 2 },
+       [QCOM_RPM_PM8921_LDO13] =               { 156, 71, 50, 2 },
+       [QCOM_RPM_PM8921_LDO14] =               { 158, 73, 51, 2 },
+       [QCOM_RPM_PM8921_LDO15] =               { 160, 75, 52, 2 },
+       [QCOM_RPM_PM8921_LDO16] =               { 162, 77, 53, 2 },
+       [QCOM_RPM_PM8921_LDO17] =               { 164, 79, 54, 2 },
+       [QCOM_RPM_PM8921_LDO18] =               { 166, 81, 55, 2 },
+       [QCOM_RPM_PM8921_LDO19] =               { 168, 83, 56, 2 },
+       [QCOM_RPM_PM8921_LDO20] =               { 170, 85, 57, 2 },
+       [QCOM_RPM_PM8921_LDO21] =               { 172, 87, 58, 2 },
+       [QCOM_RPM_PM8921_LDO22] =               { 174, 89, 59, 2 },
+       [QCOM_RPM_PM8921_LDO23] =               { 176, 91, 60, 2 },
+       [QCOM_RPM_PM8921_LDO24] =               { 178, 93, 61, 2 },
+       [QCOM_RPM_PM8921_LDO25] =               { 180, 95, 62, 2 },
+       [QCOM_RPM_PM8921_LDO26] =               { 182, 97, 63, 2 },
+       [QCOM_RPM_PM8921_LDO27] =               { 184, 99, 64, 2 },
+       [QCOM_RPM_PM8921_LDO28] =               { 186, 101, 65, 2 },
+       [QCOM_RPM_PM8921_LDO29] =               { 188, 103, 66, 2 },
+       [QCOM_RPM_PM8921_CLK1] =                { 190, 105, 67, 2 },
+       [QCOM_RPM_PM8921_CLK2] =                { 192, 107, 68, 2 },
+       [QCOM_RPM_PM8921_LVS1] =                { 194, 109, 69, 1 },
+       [QCOM_RPM_PM8921_LVS2] =                { 195, 110, 70, 1 },
+       [QCOM_RPM_PM8921_LVS3] =                { 196, 111, 71, 1 },
+       [QCOM_RPM_PM8921_LVS4] =                { 197, 112, 72, 1 },
+       [QCOM_RPM_PM8921_LVS5] =                { 198, 113, 73, 1 },
+       [QCOM_RPM_PM8921_LVS6] =                { 199, 114, 74, 1 },
+       [QCOM_RPM_PM8921_LVS7] =                { 200, 115, 75, 1 },
+       [QCOM_RPM_PM8821_SMPS1] =               { 201, 116, 76, 2 },
+       [QCOM_RPM_PM8821_SMPS2] =               { 203, 118, 77, 2 },
+       [QCOM_RPM_PM8821_LDO1] =                { 205, 120, 78, 2 },
+       [QCOM_RPM_PM8921_NCP] =                 { 207, 122, 80, 2 },
+       [QCOM_RPM_CXO_BUFFERS] =                { 209, 124, 81, 1 },
+       [QCOM_RPM_USB_OTG_SWITCH] =             { 210, 125, 82, 1 },
+       [QCOM_RPM_HDMI_SWITCH] =                { 211, 126, 83, 1 },
+       [QCOM_RPM_DDR_DMM] =                    { 212, 127, 84, 2 },
+       [QCOM_RPM_VDDMIN_GPIO] =                { 215, 131, 89, 1 },
+};
+
+static const struct qcom_rpm_data apq8064_template = {
+       .version = 3,
+       .resource_table = apq8064_rpm_resource_table,
+       .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
+};
+
+static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+       [QCOM_RPM_CXO_CLK] =                    { 32, 12, 5, 1 },
+       [QCOM_RPM_PXO_CLK] =                    { 33, 13, 6, 1 },
+       [QCOM_RPM_PLL_4] =                      { 34, 14, 7, 1 },
+       [QCOM_RPM_APPS_FABRIC_CLK] =            { 35, 15, 8, 1 },
+       [QCOM_RPM_SYS_FABRIC_CLK] =             { 36, 16, 9, 1 },
+       [QCOM_RPM_MM_FABRIC_CLK] =              { 37, 17, 10, 1 },
+       [QCOM_RPM_DAYTONA_FABRIC_CLK] =         { 38, 18, 11, 1 },
+       [QCOM_RPM_SFPB_CLK] =                   { 39, 19, 12, 1 },
+       [QCOM_RPM_CFPB_CLK] =                   { 40, 20, 13, 1 },
+       [QCOM_RPM_MMFPB_CLK] =                  { 41, 21, 14, 1 },
+       [QCOM_RPM_SMI_CLK] =                    { 42, 22, 15, 1 },
+       [QCOM_RPM_EBI1_CLK] =                   { 43, 23, 16, 1 },
+       [QCOM_RPM_APPS_L2_CACHE_CTL] =          { 44, 24, 17, 1 },
+       [QCOM_RPM_APPS_FABRIC_HALT] =           { 45, 25, 18, 2 },
+       [QCOM_RPM_APPS_FABRIC_MODE] =           { 47, 26, 19, 3 },
+       [QCOM_RPM_APPS_FABRIC_ARB] =            { 51, 28, 21, 6 },
+       [QCOM_RPM_SYS_FABRIC_HALT] =            { 63, 29, 22, 2 },
+       [QCOM_RPM_SYS_FABRIC_MODE] =            { 65, 30, 23, 3 },
+       [QCOM_RPM_SYS_FABRIC_ARB] =             { 69, 32, 25, 22 },
+       [QCOM_RPM_MM_FABRIC_HALT] =             { 105, 33, 26, 2 },
+       [QCOM_RPM_MM_FABRIC_MODE] =             { 107, 34, 27, 3 },
+       [QCOM_RPM_MM_FABRIC_ARB] =              { 111, 36, 29, 23 },
+       [QCOM_RPM_PM8901_SMPS0] =               { 134, 37, 30, 2 },
+       [QCOM_RPM_PM8901_SMPS1] =               { 136, 39, 31, 2 },
+       [QCOM_RPM_PM8901_SMPS2] =               { 138, 41, 32, 2 },
+       [QCOM_RPM_PM8901_SMPS3] =               { 140, 43, 33, 2 },
+       [QCOM_RPM_PM8901_SMPS4] =               { 142, 45, 34, 2 },
+       [QCOM_RPM_PM8901_LDO0] =                { 144, 47, 35, 2 },
+       [QCOM_RPM_PM8901_LDO1] =                { 146, 49, 36, 2 },
+       [QCOM_RPM_PM8901_LDO2] =                { 148, 51, 37, 2 },
+       [QCOM_RPM_PM8901_LDO3] =                { 150, 53, 38, 2 },
+       [QCOM_RPM_PM8901_LDO4] =                { 152, 55, 39, 2 },
+       [QCOM_RPM_PM8901_LDO5] =                { 154, 57, 40, 2 },
+       [QCOM_RPM_PM8901_LDO6] =                { 156, 59, 41, 2 },
+       [QCOM_RPM_PM8901_LVS0] =                { 158, 61, 42, 1 },
+       [QCOM_RPM_PM8901_LVS1] =                { 159, 62, 43, 1 },
+       [QCOM_RPM_PM8901_LVS2] =                { 160, 63, 44, 1 },
+       [QCOM_RPM_PM8901_LVS3] =                { 161, 64, 45, 1 },
+       [QCOM_RPM_PM8901_MVS] =                 { 162, 65, 46, 1 },
+       [QCOM_RPM_PM8058_SMPS0] =               { 163, 66, 47, 2 },
+       [QCOM_RPM_PM8058_SMPS1] =               { 165, 68, 48, 2 },
+       [QCOM_RPM_PM8058_SMPS2] =               { 167, 70, 49, 2 },
+       [QCOM_RPM_PM8058_SMPS3] =               { 169, 72, 50, 2 },
+       [QCOM_RPM_PM8058_SMPS4] =               { 171, 74, 51, 2 },
+       [QCOM_RPM_PM8058_LDO0] =                { 173, 76, 52, 2 },
+       [QCOM_RPM_PM8058_LDO1] =                { 175, 78, 53, 2 },
+       [QCOM_RPM_PM8058_LDO2] =                { 177, 80, 54, 2 },
+       [QCOM_RPM_PM8058_LDO3] =                { 179, 82, 55, 2 },
+       [QCOM_RPM_PM8058_LDO4] =                { 181, 84, 56, 2 },
+       [QCOM_RPM_PM8058_LDO5] =                { 183, 86, 57, 2 },
+       [QCOM_RPM_PM8058_LDO6] =                { 185, 88, 58, 2 },
+       [QCOM_RPM_PM8058_LDO7] =                { 187, 90, 59, 2 },
+       [QCOM_RPM_PM8058_LDO8] =                { 189, 92, 60, 2 },
+       [QCOM_RPM_PM8058_LDO9] =                { 191, 94, 61, 2 },
+       [QCOM_RPM_PM8058_LDO10] =               { 193, 96, 62, 2 },
+       [QCOM_RPM_PM8058_LDO11] =               { 195, 98, 63, 2 },
+       [QCOM_RPM_PM8058_LDO12] =               { 197, 100, 64, 2 },
+       [QCOM_RPM_PM8058_LDO13] =               { 199, 102, 65, 2 },
+       [QCOM_RPM_PM8058_LDO14] =               { 201, 104, 66, 2 },
+       [QCOM_RPM_PM8058_LDO15] =               { 203, 106, 67, 2 },
+       [QCOM_RPM_PM8058_LDO16] =               { 205, 108, 68, 2 },
+       [QCOM_RPM_PM8058_LDO17] =               { 207, 110, 69, 2 },
+       [QCOM_RPM_PM8058_LDO18] =               { 209, 112, 70, 2 },
+       [QCOM_RPM_PM8058_LDO19] =               { 211, 114, 71, 2 },
+       [QCOM_RPM_PM8058_LDO20] =               { 213, 116, 72, 2 },
+       [QCOM_RPM_PM8058_LDO21] =               { 215, 118, 73, 2 },
+       [QCOM_RPM_PM8058_LDO22] =               { 217, 120, 74, 2 },
+       [QCOM_RPM_PM8058_LDO23] =               { 219, 122, 75, 2 },
+       [QCOM_RPM_PM8058_LDO24] =               { 221, 124, 76, 2 },
+       [QCOM_RPM_PM8058_LDO25] =               { 223, 126, 77, 2 },
+       [QCOM_RPM_PM8058_LVS0] =                { 225, 128, 78, 1 },
+       [QCOM_RPM_PM8058_LVS1] =                { 226, 129, 79, 1 },
+       [QCOM_RPM_PM8058_NCP] =                 { 227, 130, 80, 2 },
+       [QCOM_RPM_CXO_BUFFERS] =                { 229, 132, 81, 1 },
+};
+
+static const struct qcom_rpm_data msm8660_template = {
+       .version = 2,
+       .resource_table = msm8660_rpm_resource_table,
+       .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
+};
+
+static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+       [QCOM_RPM_CXO_CLK] =                    { 25, 9, 5, 1 },
+       [QCOM_RPM_PXO_CLK] =                    { 26, 10, 6, 1 },
+       [QCOM_RPM_APPS_FABRIC_CLK] =            { 27, 11, 8, 1 },
+       [QCOM_RPM_SYS_FABRIC_CLK] =             { 28, 12, 9, 1 },
+       [QCOM_RPM_MM_FABRIC_CLK] =              { 29, 13, 10, 1 },
+       [QCOM_RPM_DAYTONA_FABRIC_CLK] =         { 30, 14, 11, 1 },
+       [QCOM_RPM_SFPB_CLK] =                   { 31, 15, 12, 1 },
+       [QCOM_RPM_CFPB_CLK] =                   { 32, 16, 13, 1 },
+       [QCOM_RPM_MMFPB_CLK] =                  { 33, 17, 14, 1 },
+       [QCOM_RPM_EBI1_CLK] =                   { 34, 18, 16, 1 },
+       [QCOM_RPM_APPS_FABRIC_HALT] =           { 35, 19, 18, 1 },
+       [QCOM_RPM_APPS_FABRIC_MODE] =           { 37, 20, 19, 1 },
+       [QCOM_RPM_APPS_FABRIC_IOCTL] =          { 40, 21, 20, 1 },
+       [QCOM_RPM_APPS_FABRIC_ARB] =            { 41, 22, 21, 12 },
+       [QCOM_RPM_SYS_FABRIC_HALT] =            { 53, 23, 22, 1 },
+       [QCOM_RPM_SYS_FABRIC_MODE] =            { 55, 24, 23, 1 },
+       [QCOM_RPM_SYS_FABRIC_IOCTL] =           { 58, 25, 24, 1 },
+       [QCOM_RPM_SYS_FABRIC_ARB] =             { 59, 26, 25, 29 },
+       [QCOM_RPM_MM_FABRIC_HALT] =             { 88, 27, 26, 1 },
+       [QCOM_RPM_MM_FABRIC_MODE] =             { 90, 28, 27, 1 },
+       [QCOM_RPM_MM_FABRIC_IOCTL] =            { 93, 29, 28, 1 },
+       [QCOM_RPM_MM_FABRIC_ARB] =              { 94, 30, 29, 23 },
+       [QCOM_RPM_PM8921_SMPS1] =               { 117, 31, 30, 2 },
+       [QCOM_RPM_PM8921_SMPS2] =               { 119, 33, 31, 2 },
+       [QCOM_RPM_PM8921_SMPS3] =               { 121, 35, 32, 2 },
+       [QCOM_RPM_PM8921_SMPS4] =               { 123, 37, 33, 2 },
+       [QCOM_RPM_PM8921_SMPS5] =               { 125, 39, 34, 2 },
+       [QCOM_RPM_PM8921_SMPS6] =               { 127, 41, 35, 2 },
+       [QCOM_RPM_PM8921_SMPS7] =               { 129, 43, 36, 2 },
+       [QCOM_RPM_PM8921_SMPS8] =               { 131, 45, 37, 2 },
+       [QCOM_RPM_PM8921_LDO1] =                { 133, 47, 38, 2 },
+       [QCOM_RPM_PM8921_LDO2] =                { 135, 49, 39, 2 },
+       [QCOM_RPM_PM8921_LDO3] =                { 137, 51, 40, 2 },
+       [QCOM_RPM_PM8921_LDO4] =                { 139, 53, 41, 2 },
+       [QCOM_RPM_PM8921_LDO5] =                { 141, 55, 42, 2 },
+       [QCOM_RPM_PM8921_LDO6] =                { 143, 57, 43, 2 },
+       [QCOM_RPM_PM8921_LDO7] =                { 145, 59, 44, 2 },
+       [QCOM_RPM_PM8921_LDO8] =                { 147, 61, 45, 2 },
+       [QCOM_RPM_PM8921_LDO9] =                { 149, 63, 46, 2 },
+       [QCOM_RPM_PM8921_LDO10] =               { 151, 65, 47, 2 },
+       [QCOM_RPM_PM8921_LDO11] =               { 153, 67, 48, 2 },
+       [QCOM_RPM_PM8921_LDO12] =               { 155, 69, 49, 2 },
+       [QCOM_RPM_PM8921_LDO13] =               { 157, 71, 50, 2 },
+       [QCOM_RPM_PM8921_LDO14] =               { 159, 73, 51, 2 },
+       [QCOM_RPM_PM8921_LDO15] =               { 161, 75, 52, 2 },
+       [QCOM_RPM_PM8921_LDO16] =               { 163, 77, 53, 2 },
+       [QCOM_RPM_PM8921_LDO17] =               { 165, 79, 54, 2 },
+       [QCOM_RPM_PM8921_LDO18] =               { 167, 81, 55, 2 },
+       [QCOM_RPM_PM8921_LDO19] =               { 169, 83, 56, 2 },
+       [QCOM_RPM_PM8921_LDO20] =               { 171, 85, 57, 2 },
+       [QCOM_RPM_PM8921_LDO21] =               { 173, 87, 58, 2 },
+       [QCOM_RPM_PM8921_LDO22] =               { 175, 89, 59, 2 },
+       [QCOM_RPM_PM8921_LDO23] =               { 177, 91, 60, 2 },
+       [QCOM_RPM_PM8921_LDO24] =               { 179, 93, 61, 2 },
+       [QCOM_RPM_PM8921_LDO25] =               { 181, 95, 62, 2 },
+       [QCOM_RPM_PM8921_LDO26] =               { 183, 97, 63, 2 },
+       [QCOM_RPM_PM8921_LDO27] =               { 185, 99, 64, 2 },
+       [QCOM_RPM_PM8921_LDO28] =               { 187, 101, 65, 2 },
+       [QCOM_RPM_PM8921_LDO29] =               { 189, 103, 66, 2 },
+       [QCOM_RPM_PM8921_CLK1] =                { 191, 105, 67, 2 },
+       [QCOM_RPM_PM8921_CLK2] =                { 193, 107, 68, 2 },
+       [QCOM_RPM_PM8921_LVS1] =                { 195, 109, 69, 1 },
+       [QCOM_RPM_PM8921_LVS2] =                { 196, 110, 70, 1 },
+       [QCOM_RPM_PM8921_LVS3] =                { 197, 111, 71, 1 },
+       [QCOM_RPM_PM8921_LVS4] =                { 198, 112, 72, 1 },
+       [QCOM_RPM_PM8921_LVS5] =                { 199, 113, 73, 1 },
+       [QCOM_RPM_PM8921_LVS6] =                { 200, 114, 74, 1 },
+       [QCOM_RPM_PM8921_LVS7] =                { 201, 115, 75, 1 },
+       [QCOM_RPM_PM8921_NCP] =                 { 202, 116, 80, 2 },
+       [QCOM_RPM_CXO_BUFFERS] =                { 204, 118, 81, 1 },
+       [QCOM_RPM_USB_OTG_SWITCH] =             { 205, 119, 82, 1 },
+       [QCOM_RPM_HDMI_SWITCH] =                { 206, 120, 83, 1 },
+       [QCOM_RPM_DDR_DMM] =                    { 207, 121, 84, 2 },
+};
+
+static const struct qcom_rpm_data msm8960_template = {
+       .version = 3,
+       .resource_table = msm8960_rpm_resource_table,
+       .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
+};
+
+static const struct of_device_id qcom_rpm_of_match[] = {
+       { .compatible = "qcom,rpm-apq8064", .data = &apq8064_template },
+       { .compatible = "qcom,rpm-msm8660", .data = &msm8660_template },
+       { .compatible = "qcom,rpm-msm8960", .data = &msm8960_template },
+       { }
+};
+MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
+
+int qcom_rpm_write(struct qcom_rpm *rpm,
+                  int state,
+                  int resource,
+                  u32 *buf, size_t count)
+{
+       const struct qcom_rpm_resource *res;
+       const struct qcom_rpm_data *data = rpm->data;
+       u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
+       int left;
+       int ret = 0;
+       int i;
+
+       if (WARN_ON(resource < 0 || resource >= data->n_resources))
+               return -EINVAL;
+
+       res = &data->resource_table[resource];
+       if (WARN_ON(res->size != count))
+               return -EINVAL;
+
+       mutex_lock(&rpm->lock);
+
+       for (i = 0; i < res->size; i++)
+               writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+
+       bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+       for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
+               writel_relaxed(sel_mask[i],
+                              RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
+       }
+
+       writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
+
+       reinit_completion(&rpm->ack);
+       regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
+
+       left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+       if (!left)
+               ret = -ETIMEDOUT;
+       else if (rpm->ack_status & RPM_REJECTED)
+               ret = -EIO;
+
+       mutex_unlock(&rpm->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_write);
+
+static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
+{
+       struct qcom_rpm *rpm = dev;
+       u32 ack;
+       int i;
+
+       ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+       for (i = 0; i < RPM_SELECT_SIZE; i++)
+               writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
+       writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+
+       if (ack & RPM_NOTIFICATION) {
+               dev_warn(rpm->dev, "ignoring notification!\n");
+       } else {
+               rpm->ack_status = ack;
+               complete(&rpm->ack);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_rpm_err_interrupt(int irq, void *dev)
+{
+       struct qcom_rpm *rpm = dev;
+
+       regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
+       dev_err(rpm->dev, "RPM triggered fatal error\n");
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_rpm_wakeup_interrupt(int irq, void *dev)
+{
+       return IRQ_HANDLED;
+}
+
+static int qcom_rpm_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       struct device_node *syscon_np;
+       struct resource *res;
+       struct qcom_rpm *rpm;
+       u32 fw_version[3];
+       int irq_wakeup;
+       int irq_ack;
+       int irq_err;
+       int ret;
+
+       rpm = devm_kzalloc(&pdev->dev, sizeof(*rpm), GFP_KERNEL);
+       if (!rpm)
+               return -ENOMEM;
+
+       rpm->dev = &pdev->dev;
+       mutex_init(&rpm->lock);
+       init_completion(&rpm->ack);
+
+       irq_ack = platform_get_irq_byname(pdev, "ack");
+       if (irq_ack < 0) {
+               dev_err(&pdev->dev, "required ack interrupt missing\n");
+               return irq_ack;
+       }
+
+       irq_err = platform_get_irq_byname(pdev, "err");
+       if (irq_err < 0) {
+               dev_err(&pdev->dev, "required err interrupt missing\n");
+               return irq_err;
+       }
+
+       irq_wakeup = platform_get_irq_byname(pdev, "wakeup");
+       if (irq_wakeup < 0) {
+               dev_err(&pdev->dev, "required wakeup interrupt missing\n");
+               return irq_wakeup;
+       }
+
+       match = of_match_device(qcom_rpm_of_match, &pdev->dev);
+       rpm->data = match->data;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       rpm->status_regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(rpm->status_regs))
+               return PTR_ERR(rpm->status_regs);
+       rpm->ctrl_regs = rpm->status_regs + 0x400;
+       rpm->req_regs = rpm->status_regs + 0x600;
+
+       syscon_np = of_parse_phandle(pdev->dev.of_node, "qcom,ipc", 0);
+       if (!syscon_np) {
+               dev_err(&pdev->dev, "no qcom,ipc node\n");
+               return -ENODEV;
+       }
+
+       rpm->ipc_regmap = syscon_node_to_regmap(syscon_np);
+       if (IS_ERR(rpm->ipc_regmap))
+               return PTR_ERR(rpm->ipc_regmap);
+
+       ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 1,
+                                        &rpm->ipc_offset);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "no offset in qcom,ipc\n");
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 2,
+                                        &rpm->ipc_bit);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "no bit in qcom,ipc\n");
+               return -EINVAL;
+       }
+
+       dev_set_drvdata(&pdev->dev, rpm);
+
+       fw_version[0] = readl(RPM_STATUS_REG(rpm, 0));
+       fw_version[1] = readl(RPM_STATUS_REG(rpm, 1));
+       fw_version[2] = readl(RPM_STATUS_REG(rpm, 2));
+       if (fw_version[0] != rpm->data->version) {
+               dev_err(&pdev->dev,
+                       "RPM version %u.%u.%u incompatible with driver version %u",
+                       fw_version[0],
+                       fw_version[1],
+                       fw_version[2],
+                       rpm->data->version);
+               return -EFAULT;
+       }
+
+       dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
+                                                       fw_version[1],
+                                                       fw_version[2]);
+
+       ret = devm_request_irq(&pdev->dev,
+                              irq_ack,
+                              qcom_rpm_ack_interrupt,
+                              IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
+                              "qcom_rpm_ack",
+                              rpm);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request ack interrupt\n");
+               return ret;
+       }
+
+       ret = irq_set_irq_wake(irq_ack, 1);
+       if (ret)
+               dev_warn(&pdev->dev, "failed to mark ack irq as wakeup\n");
+
+       ret = devm_request_irq(&pdev->dev,
+                              irq_err,
+                              qcom_rpm_err_interrupt,
+                              IRQF_TRIGGER_RISING,
+                              "qcom_rpm_err",
+                              rpm);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request err interrupt\n");
+               return ret;
+       }
+
+       ret = devm_request_irq(&pdev->dev,
+                              irq_wakeup,
+                              qcom_rpm_wakeup_interrupt,
+                              IRQF_TRIGGER_RISING,
+                              "qcom_rpm_wakeup",
+                              rpm);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request wakeup interrupt\n");
+               return ret;
+       }
+
+       ret = irq_set_irq_wake(irq_wakeup, 1);
+       if (ret)
+               dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n");
+
+       return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+static int qcom_rpm_remove(struct platform_device *pdev)
+{
+       of_platform_depopulate(&pdev->dev);
+       return 0;
+}
+
+static struct platform_driver qcom_rpm_driver = {
+       .probe = qcom_rpm_probe,
+       .remove = qcom_rpm_remove,
+       .driver  = {
+               .name  = "qcom_rpm",
+               .of_match_table = qcom_rpm_of_match,
+       },
+};
+
+static int __init qcom_rpm_init(void)
+{
+       return platform_driver_register(&qcom_rpm_driver);
+}
+arch_initcall(qcom_rpm_init);
+
+static void __exit qcom_rpm_exit(void)
+{
+       platform_driver_unregister(&qcom_rpm_driver);
+}
+module_exit(qcom_rpm_exit)
+
+MODULE_DESCRIPTION("Qualcomm Resource Power Manager driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
index 663f8a37aa6b27dd263e1f4d1c1345cff141b782..2d64430c719bca36615d0a88ca58a02413c5090e 100644 (file)
@@ -222,7 +222,7 @@ static struct regmap_bus retu_bus = {
        .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
 };
 
-static struct regmap_config retu_config = {
+static const struct regmap_config retu_config = {
        .reg_bits = 8,
        .val_bits = 16,
 };
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
new file mode 100644 (file)
index 0000000..db395a6
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * MFD core driver for the Richtek RT5033.
+ *
+ * RT5033 comprises multiple sub-devices switcing charger, fuel gauge,
+ * flash LED, current source, LDO and BUCK regulators.
+ *
+ * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rt5033.h>
+#include <linux/mfd/rt5033-private.h>
+
+static const struct regmap_irq rt5033_irqs[] = {
+       { .mask = RT5033_PMIC_IRQ_BUCKOCP, },
+       { .mask = RT5033_PMIC_IRQ_BUCKLV, },
+       { .mask = RT5033_PMIC_IRQ_SAFELDOLV, },
+       { .mask = RT5033_PMIC_IRQ_LDOLV, },
+       { .mask = RT5033_PMIC_IRQ_OT, },
+       { .mask = RT5033_PMIC_IRQ_VDDA_UV, },
+};
+
+static const struct regmap_irq_chip rt5033_irq_chip = {
+       .name           = "rt5033",
+       .status_base    = RT5033_REG_PMIC_IRQ_STAT,
+       .mask_base      = RT5033_REG_PMIC_IRQ_CTRL,
+       .mask_invert    = true,
+       .num_regs       = 1,
+       .irqs           = rt5033_irqs,
+       .num_irqs       = ARRAY_SIZE(rt5033_irqs),
+};
+
+static const struct mfd_cell rt5033_devs[] = {
+       { .name = "rt5033-regulator", },
+       {
+               .name = "rt5033-charger",
+               .of_compatible = "richtek,rt5033-charger",
+       }, {
+               .name = "rt5033-battery",
+               .of_compatible = "richtek,rt5033-battery",
+       },
+};
+
+static const struct regmap_config rt5033_regmap_config = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = RT5033_REG_END,
+};
+
+static int rt5033_i2c_probe(struct i2c_client *i2c,
+                               const struct i2c_device_id *id)
+{
+       struct rt5033_dev *rt5033;
+       unsigned int dev_id;
+       int ret;
+
+       rt5033 = devm_kzalloc(&i2c->dev, sizeof(*rt5033), GFP_KERNEL);
+       if (!rt5033)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, rt5033);
+       rt5033->dev = &i2c->dev;
+       rt5033->irq = i2c->irq;
+       rt5033->wakeup = true;
+
+       rt5033->regmap = devm_regmap_init_i2c(i2c, &rt5033_regmap_config);
+       if (IS_ERR(rt5033->regmap)) {
+               dev_err(&i2c->dev, "Failed to allocate register map.\n");
+               return PTR_ERR(rt5033->regmap);
+       }
+
+       ret = regmap_read(rt5033->regmap, RT5033_REG_DEVICE_ID, &dev_id);
+       if (ret) {
+               dev_err(&i2c->dev, "Device not found\n");
+               return -ENODEV;
+       }
+       dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id);
+
+       ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq,
+                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                       0, &rt5033_irq_chip, &rt5033->irq_data);
+       if (ret) {
+               dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
+                                                       rt5033->irq, ret);
+               return ret;
+       }
+
+       ret = mfd_add_devices(rt5033->dev, -1, rt5033_devs,
+                       ARRAY_SIZE(rt5033_devs), NULL, 0,
+                       regmap_irq_get_domain(rt5033->irq_data));
+       if (ret < 0) {
+               dev_err(&i2c->dev, "Failed to add RT5033 child devices.\n");
+               return ret;
+       }
+
+       device_init_wakeup(rt5033->dev, rt5033->wakeup);
+
+       return 0;
+}
+
+static int rt5033_i2c_remove(struct i2c_client *i2c)
+{
+       mfd_remove_devices(&i2c->dev);
+
+       return 0;
+}
+
+static const struct i2c_device_id rt5033_i2c_id[] = {
+       { "rt5033", },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, rt5033_i2c_id);
+
+static const struct of_device_id rt5033_dt_match[] = {
+       { .compatible = "richtek,rt5033", },
+       { }
+};
+
+static struct i2c_driver rt5033_driver = {
+       .driver = {
+               .name = "rt5033",
+               .of_match_table = of_match_ptr(rt5033_dt_match),
+       },
+       .probe = rt5033_i2c_probe,
+       .remove = rt5033_i2c_remove,
+       .id_table = rt5033_i2c_id,
+};
+module_i2c_driver(rt5033_driver);
+
+MODULE_ALIAS("i2c:rt5033");
+MODULE_DESCRIPTION("Richtek RT5033 multi-function core driver");
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_LICENSE("GPL");
index 210d1f85679e50dca4cbb034d4bd4ce91c1bb23f..ede50244f265b14d950fba0448652960304fa971 100644 (file)
@@ -681,9 +681,27 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
 #ifdef CONFIG_PM
 static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
 {
+       struct rtsx_ucr *ucr =
+               (struct rtsx_ucr *)usb_get_intfdata(intf);
+       u16 val = 0;
+
        dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
                        __func__, message.event);
 
+       if (PMSG_IS_AUTO(message)) {
+               if (mutex_trylock(&ucr->dev_mutex)) {
+                       rtsx_usb_get_card_status(ucr, &val);
+                       mutex_unlock(&ucr->dev_mutex);
+
+                       /* Defer the autosuspend if card exists */
+                       if (val & (SD_CD | MS_CD))
+                               return -EAGAIN;
+               } else {
+                       /* There is an ongoing operation*/
+                       return -EAGAIN;
+               }
+       }
+
        return 0;
 }
 
index 90112d4cc9059199ff1d4855bdadb42eb867a8f3..03246880d4840643c2570d6649f2ffbbccf88cc7 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/mfd/smsc.h>
 #include <linux/of_platform.h>
 
-static struct regmap_config smsc_regmap_config = {
+static const struct regmap_config smsc_regmap_config = {
                .reg_bits = 8,
                .val_bits = 8,
                .max_register = SMSC_VEN_ID_H,
index 2f2e9f06257116ebe2040cf85712caa2ec0ed7f8..191173166d6532420c78b37735158204b2bd39a8 100644 (file)
@@ -41,6 +41,14 @@ static const struct resource sun6i_a31_apb0_gates_clk_res[] = {
        },
 };
 
+static const struct resource sun6i_a31_ir_clk_res[] = {
+       {
+               .start = 0x54,
+               .end = 0x57,
+               .flags = IORESOURCE_MEM,
+       },
+};
+
 static const struct resource sun6i_a31_apb0_rstc_res[] = {
        {
                .start = 0xb0,
@@ -68,6 +76,12 @@ static const struct mfd_cell sun6i_a31_prcm_subdevs[] = {
                .num_resources = ARRAY_SIZE(sun6i_a31_apb0_gates_clk_res),
                .resources = sun6i_a31_apb0_gates_clk_res,
        },
+       {
+               .name = "sun6i-a31-ir-clk",
+               .of_compatible = "allwinner,sun4i-a10-mod0-clk",
+               .num_resources = ARRAY_SIZE(sun6i_a31_ir_clk_res),
+               .resources = sun6i_a31_ir_clk_res,
+       },
        {
                .name = "sun6i-a31-apb0-clock-reset",
                .of_compatible = "allwinner,sun6i-a31-clock-reset",
index 80a919a8ca975a2783650fe10e3ec18160818559..7d1cfc1d3ce00314c3fdbab602bf3a1d8d1fa9be 100644 (file)
@@ -145,7 +145,7 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(tps65217_clear_bits);
 
-static struct regmap_config tps65217_regmap_config = {
+static const struct regmap_config tps65217_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 
index d6b764349f9d309956b36fd270ae1b6940e8167c..7af11a8b975327e32ea838c68f381ab5e96b7eb7 100644 (file)
@@ -135,7 +135,7 @@ static const struct regmap_access_table tps65218_volatile_table = {
        .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
 };
 
-static struct regmap_config tps65218_regmap_config = {
+static const struct regmap_config tps65218_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
index db11b4f406116124ca86d82b1cec6d2e358c99c2..489674a2497e042b4431545b687a52fe0bd59d71 100644 (file)
@@ -207,7 +207,7 @@ static struct twl_mapping twl4030_map[] = {
        { 2, TWL5031_BASEADD_INTERRUPTS },
 };
 
-static struct reg_default twl4030_49_defaults[] = {
+static const struct reg_default twl4030_49_defaults[] = {
        /* Audio Registers */
        { 0x01, 0x00}, /* CODEC_MODE    */
        { 0x02, 0x00}, /* OPTION        */
@@ -306,7 +306,7 @@ static const struct regmap_access_table twl4030_49_volatile_table = {
        .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges),
 };
 
-static struct regmap_config twl4030_regmap_config[4] = {
+static const struct regmap_config twl4030_regmap_config[4] = {
        {
                /* Address 0x48 */
                .reg_bits = 8,
@@ -369,7 +369,7 @@ static struct twl_mapping twl6030_map[] = {
        { 1, TWL6030_BASEADD_GASGAUGE },
 };
 
-static struct regmap_config twl6030_regmap_config[3] = {
+static const struct regmap_config twl6030_regmap_config[3] = {
        {
                /* Address 0x48 */
                .reg_bits = 8,
@@ -1087,7 +1087,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
        struct twl4030_platform_data    *pdata = dev_get_platdata(&client->dev);
        struct device_node              *node = client->dev.of_node;
        struct platform_device          *pdev;
-       struct regmap_config            *twl_regmap_config;
+       const struct regmap_config      *twl_regmap_config;
        int                             irq_base = 0;
        int                             status;
        unsigned                        i, num_slaves;
index 9687645162aef9b16b53ede89b2af858a31936fa..f71ee3dbc2a24a8f416d2063cff1d04422a55000 100644 (file)
@@ -44,7 +44,7 @@
 #define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
 #define TWL6040_NUM_SUPPLIES   (2)
 
-static struct reg_default twl6040_defaults[] = {
+static const struct reg_default twl6040_defaults[] = {
        { 0x01, 0x4B }, /* REG_ASICID   (ro) */
        { 0x02, 0x00 }, /* REG_ASICREV  (ro) */
        { 0x03, 0x00 }, /* REG_INTID    */
@@ -580,7 +580,7 @@ static bool twl6040_writeable_reg(struct device *dev, unsigned int reg)
        }
 }
 
-static struct regmap_config twl6040_regmap_config = {
+static const struct regmap_config twl6040_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 
index 6ca9d25cc3f0f65018290e164e878ad0e2014e24..53ae5af5d6e4f97b88c19543a3ec81df09de64da 100644 (file)
 static const struct mfd_cell wm8994_regulator_devs[] = {
        {
                .name = "wm8994-ldo",
-               .id = 1,
+               .id = 0,
                .pm_runtime_no_callbacks = true,
        },
        {
                .name = "wm8994-ldo",
-               .id = 2,
+               .id = 1,
                .pm_runtime_no_callbacks = true,
        },
 };
@@ -344,7 +344,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
        dev_set_drvdata(wm8994->dev, wm8994);
 
        /* Add the on-chip regulators first for bootstrapping */
-       ret = mfd_add_devices(wm8994->dev, -1,
+       ret = mfd_add_devices(wm8994->dev, 0,
                              wm8994_regulator_devs,
                              ARRAY_SIZE(wm8994_regulator_devs),
                              NULL, 0, NULL);
index 6af0a28ba37dd6fe54cc44a4164734597b53886d..e8a4218b57267f508eb871f216de112e93707d31 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/err.h>
 
 #include <linux/clk.h>
-#include <linux/clk/sunxi.h>
-
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
@@ -229,6 +227,8 @@ struct sunxi_mmc_host {
        /* clock management */
        struct clk      *clk_ahb;
        struct clk      *clk_mmc;
+       struct clk      *clk_sample;
+       struct clk      *clk_output;
 
        /* irq */
        spinlock_t      lock;
@@ -653,26 +653,31 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
 
        /* determine delays */
        if (rate <= 400000) {
-               oclk_dly = 0;
-               sclk_dly = 7;
+               oclk_dly = 180;
+               sclk_dly = 42;
        } else if (rate <= 25000000) {
-               oclk_dly = 0;
-               sclk_dly = 5;
+               oclk_dly = 180;
+               sclk_dly = 75;
        } else if (rate <= 50000000) {
                if (ios->timing == MMC_TIMING_UHS_DDR50) {
-                       oclk_dly = 2;
-                       sclk_dly = 4;
+                       oclk_dly = 60;
+                       sclk_dly = 120;
                } else {
-                       oclk_dly = 3;
-                       sclk_dly = 5;
+                       oclk_dly = 90;
+                       sclk_dly = 150;
                }
+       } else if (rate <= 100000000) {
+               oclk_dly = 6;
+               sclk_dly = 24;
+       } else if (rate <= 200000000) {
+               oclk_dly = 3;
+               sclk_dly = 12;
        } else {
-               /* rate > 50000000 */
-               oclk_dly = 2;
-               sclk_dly = 4;
+               return -EINVAL;
        }
 
-       clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly);
+       clk_set_phase(host->clk_sample, sclk_dly);
+       clk_set_phase(host->clk_output, oclk_dly);
 
        return sunxi_mmc_oclk_onoff(host, 1);
 }
@@ -913,6 +918,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                return PTR_ERR(host->clk_mmc);
        }
 
+       host->clk_output = devm_clk_get(&pdev->dev, "output");
+       if (IS_ERR(host->clk_output)) {
+               dev_err(&pdev->dev, "Could not get output clock\n");
+               return PTR_ERR(host->clk_output);
+       }
+
+       host->clk_sample = devm_clk_get(&pdev->dev, "sample");
+       if (IS_ERR(host->clk_sample)) {
+               dev_err(&pdev->dev, "Could not get sample clock\n");
+               return PTR_ERR(host->clk_sample);
+       }
+
        host->reset = devm_reset_control_get(&pdev->dev, "ahb");
 
        ret = clk_prepare_enable(host->clk_ahb);
@@ -927,11 +944,23 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                goto error_disable_clk_ahb;
        }
 
+       ret = clk_prepare_enable(host->clk_output);
+       if (ret) {
+               dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
+               goto error_disable_clk_mmc;
+       }
+
+       ret = clk_prepare_enable(host->clk_sample);
+       if (ret) {
+               dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
+               goto error_disable_clk_output;
+       }
+
        if (!IS_ERR(host->reset)) {
                ret = reset_control_deassert(host->reset);
                if (ret) {
                        dev_err(&pdev->dev, "reset err %d\n", ret);
-                       goto error_disable_clk_mmc;
+                       goto error_disable_clk_sample;
                }
        }
 
@@ -950,6 +979,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
 error_assert_reset:
        if (!IS_ERR(host->reset))
                reset_control_assert(host->reset);
+error_disable_clk_sample:
+       clk_disable_unprepare(host->clk_sample);
+error_disable_clk_output:
+       clk_disable_unprepare(host->clk_output);
 error_disable_clk_mmc:
        clk_disable_unprepare(host->clk_mmc);
 error_disable_clk_ahb:
index cc13ea5ce4d58f6cb532d151d5f22d9aa46e8c2c..c0720c1ee4c9607d0887e7b4caacecdac96088af 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 
+#include <uapi/linux/magic.h>
+
 /*
  * NAND flash on Netgear R6250 was verified to contain 15 partitions.
  * This will result in allocating too big array for some old devices, but the
@@ -39,7 +41,8 @@
 #define ML_MAGIC1                      0x39685a42
 #define ML_MAGIC2                      0x26594131
 #define TRX_MAGIC                      0x30524448
-#define SQSH_MAGIC                     0x71736873      /* shsq */
+#define SHSQ_MAGIC                     0x71736873      /* shsq (weird ZTE H218N endianness) */
+#define UBI_EC_MAGIC                   0x23494255      /* UBI# */
 
 struct trx_header {
        uint32_t magic;
@@ -50,7 +53,7 @@ struct trx_header {
        uint32_t offset[3];
 } __packed;
 
-static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
+static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
                                 u64 offset, uint32_t mask_flags)
 {
        part->name = name;
@@ -58,6 +61,26 @@ static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
        part->mask_flags = mask_flags;
 }
 
+static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
+                                                 size_t offset)
+{
+       uint32_t buf;
+       size_t bytes_read;
+
+       if (mtd_read(master, offset, sizeof(buf), &bytes_read,
+                    (uint8_t *)&buf) < 0) {
+               pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+                       offset);
+               goto out_default;
+       }
+
+       if (buf == UBI_EC_MAGIC)
+               return "ubi";
+
+out_default:
+       return "rootfs";
+}
+
 static int bcm47xxpart_parse(struct mtd_info *master,
                             struct mtd_partition **pparts,
                             struct mtd_part_parser_data *data)
@@ -73,8 +96,12 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        int last_trx_part = -1;
        int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
 
-       if (blocksize <= 0x10000)
-               blocksize = 0x10000;
+       /*
+        * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
+        * partitions were aligned to at least 0x1000 anyway.
+        */
+       if (blocksize < 0x1000)
+               blocksize = 0x1000;
 
        /* Alloc */
        parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
@@ -186,8 +213,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                         * we want to have jffs2 (overlay) in the same mtd.
                         */
                        if (trx->offset[i]) {
+                               const char *name;
+
+                               name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]);
                                bcm47xxpart_add_part(&parts[curr_part++],
-                                                    "rootfs",
+                                                    name,
                                                     offset + trx->offset[i],
                                                     0);
                                i++;
@@ -205,7 +235,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Squashfs on devices not using TRX */
-               if (buf[0x000 / 4] == SQSH_MAGIC) {
+               if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC ||
+                   buf[0x000 / 4] == SHSQ_MAGIC) {
                        bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
                                             offset, 0);
                        continue;
index 991c2a1c05d364f33ac72cee2164f0ba6567e554..afb43d5e178269d33443a59a439245263cb263c0 100644 (file)
@@ -68,6 +68,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
        mtd->_get_unmapped_area = mapram_unmapped_area;
        mtd->_read = mapram_read;
        mtd->_write = mapram_write;
+       mtd->_panic_write = mapram_write;
        mtd->_sync = mapram_nop;
        mtd->flags = MTD_CAP_RAM;
        mtd->writesize = 1;
index 47a43cf7e5c60fc162934b6da893b32dbd0cb717..e67f73ab44c9db23eae052c206139a03aafe15b9 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/of.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/map.h>
 
@@ -28,6 +29,15 @@ static struct mtd_chip_driver maprom_chipdrv = {
        .module = THIS_MODULE
 };
 
+static unsigned int default_erasesize(struct map_info *map)
+{
+       const __be32 *erase_size = NULL;
+
+       erase_size = of_get_property(map->device_node, "erase-size", NULL);
+
+       return !erase_size ? map->size : be32_to_cpu(*erase_size);
+}
+
 static struct mtd_info *map_rom_probe(struct map_info *map)
 {
        struct mtd_info *mtd;
@@ -47,8 +57,9 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
        mtd->_sync = maprom_nop;
        mtd->_erase = maprom_erase;
        mtd->flags = MTD_CAP_ROM;
-       mtd->erasesize = map->size;
+       mtd->erasesize = default_erasesize(map);
        mtd->writesize = 1;
+       mtd->writebufsize = 1;
 
        __module_get(THIS_MODULE);
        return mtd;
index 54ffe5223e64294558c1189316d61d3e23b50aed..3060025c8af47772cd1d9cc50fdcc277de1521ae 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/clk.h>
 
 #include "serial_flash_cmds.h"
 
@@ -262,6 +263,7 @@ struct stfsm {
        struct mtd_info         mtd;
        struct mutex            lock;
        struct flash_info       *info;
+       struct clk              *clk;
 
        uint32_t                configuration;
        uint32_t                fifo_dir_delay;
@@ -663,6 +665,23 @@ static struct stfsm_seq stfsm_seq_write_status = {
                    SEQ_CFG_STARTSEQ),
 };
 
+/* Dummy sequence to read one byte of data from flash into the FIFO */
+static const struct stfsm_seq stfsm_seq_load_fifo_byte = {
+       .data_size = TRANSFER_SIZE(1),
+       .seq_opc[0] = (SEQ_OPC_PADS_1 |
+                      SEQ_OPC_CYCLES(8) |
+                      SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
+       .seq = {
+               STFSM_INST_CMD1,
+               STFSM_INST_DATA_READ,
+               STFSM_INST_STOP,
+       },
+       .seq_cfg = (SEQ_CFG_PADS_1 |
+                   SEQ_CFG_READNOTWRITE |
+                   SEQ_CFG_CSDEASSERT |
+                   SEQ_CFG_STARTSEQ),
+};
+
 static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
 {
        seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
@@ -695,22 +714,6 @@ static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
        return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
 }
 
-static void stfsm_clear_fifo(struct stfsm *fsm)
-{
-       uint32_t avail;
-
-       for (;;) {
-               avail = stfsm_fifo_available(fsm);
-               if (!avail)
-                       break;
-
-               while (avail) {
-                       readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
-                       avail--;
-               }
-       }
-}
-
 static inline void stfsm_load_seq(struct stfsm *fsm,
                                  const struct stfsm_seq *seq)
 {
@@ -772,6 +775,68 @@ static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
        }
 }
 
+/*
+ * Clear the data FIFO
+ *
+ * Typically, this is only required during driver initialisation, where no
+ * assumptions can be made regarding the state of the FIFO.
+ *
+ * The process of clearing the FIFO is complicated by fact that while it is
+ * possible for the FIFO to contain an arbitrary number of bytes [1], the
+ * SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words
+ * present.  Furthermore, data can only be drained from the FIFO by reading
+ * complete 32-bit words.
+ *
+ * With this in mind, a two stage process is used to the clear the FIFO:
+ *
+ *     1. Read any complete 32-bit words from the FIFO, as reported by the
+ *        SPI_FAST_SEQ_STA register.
+ *
+ *     2. Mop up any remaining bytes.  At this point, it is not known if there
+ *        are 0, 1, 2, or 3 bytes in the FIFO.  To handle all cases, a dummy FSM
+ *        sequence is used to load one byte at a time, until a complete 32-bit
+ *        word is formed; at most, 4 bytes will need to be loaded.
+ *
+ * [1] It is theoretically possible for the FIFO to contain an arbitrary number
+ *     of bits.  However, since there are no known use-cases that leave
+ *     incomplete bytes in the FIFO, only words and bytes are considered here.
+ */
+static void stfsm_clear_fifo(struct stfsm *fsm)
+{
+       const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte;
+       uint32_t words, i;
+
+       /* 1. Clear any 32-bit words */
+       words = stfsm_fifo_available(fsm);
+       if (words) {
+               for (i = 0; i < words; i++)
+                       readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+               dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words);
+       }
+
+       /*
+        * 2. Clear any remaining bytes
+        *    - Load the FIFO, one byte at a time, until a complete 32-bit word
+        *      is available.
+        */
+       for (i = 0, words = 0; i < 4 && !words; i++) {
+               stfsm_load_seq(fsm, seq);
+               stfsm_wait_seq(fsm);
+               words = stfsm_fifo_available(fsm);
+       }
+
+       /*    - A single word must be available now */
+       if (words != 1) {
+               dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n");
+               return;
+       }
+
+       /*    - Read the 32-bit word */
+       readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+
+       dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i);
+}
+
 static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
                            uint32_t size)
 {
@@ -1521,11 +1586,11 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
        uint32_t size_lb;
        uint32_t size_mop;
        uint32_t tmp[4];
+       uint32_t i;
        uint32_t page_buf[FLASH_PAGESIZE_32];
        uint8_t *t = (uint8_t *)&tmp;
        const uint8_t *p;
        int ret;
-       int i;
 
        dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
 
@@ -1843,8 +1908,7 @@ static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
        uint32_t emi_freq;
        uint32_t clk_div;
 
-       /* TODO: Make this dynamic */
-       emi_freq = STFSM_DEFAULT_EMI_FREQ;
+       emi_freq = clk_get_rate(fsm->clk);
 
        /*
         * Calculate clk_div - values between 2 and 128
@@ -1994,6 +2058,18 @@ static int stfsm_probe(struct platform_device *pdev)
                return PTR_ERR(fsm->base);
        }
 
+       fsm->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(fsm->clk)) {
+               dev_err(fsm->dev, "Couldn't find EMI clock.\n");
+               return PTR_ERR(fsm->clk);
+       }
+
+       ret = clk_prepare_enable(fsm->clk);
+       if (ret) {
+               dev_err(fsm->dev, "Failed to enable EMI clock.\n");
+               return ret;
+       }
+
        mutex_init(&fsm->lock);
 
        ret = stfsm_init(fsm);
@@ -2058,6 +2134,28 @@ static int stfsm_remove(struct platform_device *pdev)
        return mtd_device_unregister(&fsm->mtd);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int stfsmfsm_suspend(struct device *dev)
+{
+       struct stfsm *fsm = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(fsm->clk);
+
+       return 0;
+}
+
+static int stfsmfsm_resume(struct device *dev)
+{
+       struct stfsm *fsm = dev_get_drvdata(dev);
+
+       clk_prepare_enable(fsm->clk);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+
 static const struct of_device_id stfsm_match[] = {
        { .compatible = "st,spi-fsm", },
        {},
@@ -2070,6 +2168,7 @@ static struct platform_driver stfsm_driver = {
        .driver         = {
                .name   = "st-spi-fsm",
                .of_match_table = stfsm_match,
+               .pm     = &stfsm_pm_ops,
        },
 };
 module_platform_driver(stfsm_driver);
index f35cd2081314d79f0c8742a7965f8868d624c764..ff26e979b1a17c243e1525adfceed3886d479ba1 100644 (file)
@@ -269,6 +269,16 @@ static int of_flash_probe(struct platform_device *dev)
                        info->list[i].mtd = obsolete_probe(dev,
                                                           &info->list[i].map);
                }
+
+               /* Fall back to mapping region as ROM */
+               if (!info->list[i].mtd) {
+                       dev_warn(&dev->dev,
+                               "do_map_probe() failed for type %s\n",
+                                probe_type);
+
+                       info->list[i].mtd = do_map_probe("map_rom",
+                                                        &info->list[i].map);
+               }
                mtd_list[i] = info->list[i].mtd;
 
                err = -ENXIO;
index 485ea751c7f9b6ab18530e2d7ffcec3f328146c0..bb4c14f83c75acd0c070fca721e2516447fe633c 100644 (file)
@@ -45,8 +45,6 @@ struct mtdblk_dev {
        enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
 };
 
-static DEFINE_MUTEX(mtdblks_lock);
-
 /*
  * Cache stuff...
  *
@@ -286,10 +284,8 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
 
        pr_debug("mtdblock_open\n");
 
-       mutex_lock(&mtdblks_lock);
        if (mtdblk->count) {
                mtdblk->count++;
-               mutex_unlock(&mtdblks_lock);
                return 0;
        }
 
@@ -302,8 +298,6 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
                mtdblk->cache_data = NULL;
        }
 
-       mutex_unlock(&mtdblks_lock);
-
        pr_debug("ok\n");
 
        return 0;
@@ -315,8 +309,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
 
        pr_debug("mtdblock_release\n");
 
-       mutex_lock(&mtdblks_lock);
-
        mutex_lock(&mtdblk->cache_mutex);
        write_cached_data(mtdblk);
        mutex_unlock(&mtdblk->cache_mutex);
@@ -331,8 +323,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
                vfree(mtdblk->cache_data);
        }
 
-       mutex_unlock(&mtdblks_lock);
-
        pr_debug("ok\n");
 }
 
index eacc3aac732789085b1bbfc86531c6fa5b0dbe17..239a8c806b6772df642bf6cb1385616147e975c8 100644 (file)
@@ -311,7 +311,8 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
                        devops.len = subdev->size - to;
 
                err = mtd_write_oob(subdev, to, &devops);
-               ops->retlen += devops.oobretlen;
+               ops->retlen += devops.retlen;
+               ops->oobretlen += devops.oobretlen;
                if (err)
                        return err;
 
index 0ec4d6ea1e4b8d7729a2299702391ab3f504e914..11883bd26d9d35e8234cc7dc74f51b7ed57d7566 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/backing-dev.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/reboot.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -356,6 +357,17 @@ unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
 #endif
 
+static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
+                              void *cmd)
+{
+       struct mtd_info *mtd;
+
+       mtd = container_of(n, struct mtd_info, reboot_notifier);
+       mtd->_reboot(mtd);
+
+       return NOTIFY_DONE;
+}
+
 /**
  *     add_mtd_device - register an MTD device
  *     @mtd: pointer to new MTD device info structure
@@ -544,6 +556,19 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
                        err = -ENODEV;
        }
 
+       /*
+        * FIXME: some drivers unfortunately call this function more than once.
+        * So we have to check if we've already assigned the reboot notifier.
+        *
+        * Generally, we can make multiple calls work for most cases, but it
+        * does cause problems with parse_mtd_partitions() above (e.g.,
+        * cmdlineparts will register partitions more than once).
+        */
+       if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
+               mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
+               register_reboot_notifier(&mtd->reboot_notifier);
+       }
+
        return err;
 }
 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
@@ -558,6 +583,9 @@ int mtd_device_unregister(struct mtd_info *master)
 {
        int err;
 
+       if (master->_reboot)
+               unregister_reboot_notifier(&master->reboot_notifier);
+
        err = del_mtd_partitions(master);
        if (err)
                return err;
index 7d0150d2043201523bcd5882237fc28157dd13b9..5b76a173cd95d6d59c41c47d15c081ff39473c53 100644 (file)
@@ -421,7 +421,7 @@ config MTD_NAND_ORION
 
 config MTD_NAND_FSL_ELBC
        tristate "NAND support for Freescale eLBC controllers"
-       depends on PPC_OF
+       depends on PPC
        select FSL_LBC
        help
          Various Freescale chips, including the 8313, include a NAND Flash
@@ -524,4 +524,9 @@ config MTD_NAND_SUNXI
        help
          Enables support for NAND Flash chips on Allwinner SoCs.
 
+config MTD_NAND_HISI504
+       tristate "Support for NAND controller on Hisilicon SoC Hip04"
+       help
+         Enables support for NAND controller on Hisilicon SoC Hip04.
+
 endif # MTD_NAND
index bd38f21d2e28e8b622ba42a1f0c0c2c6562e4bfb..582bbd05aff7ab7df0a20c282bc25faa1532437a 100644 (file)
@@ -51,5 +51,6 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND)      += gpmi-nand/
 obj-$(CONFIG_MTD_NAND_XWAY)            += xway_nand.o
 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)   += bcm47xxnflash/
 obj-$(CONFIG_MTD_NAND_SUNXI)           += sunxi_nand.o
+obj-$(CONFIG_MTD_NAND_HISI504)         += hisi504_nand.o
 
 nand-objs := nand_base.o nand_bbt.o nand_timings.o
index f1d555cfb332f17a91ef4c53eb3ca5c617477b70..842f8fe91b56cb2ef248c1c6e868cedaf2113766 100644 (file)
@@ -183,7 +183,7 @@ static int ams_delta_init(struct platform_device *pdev)
                return -ENXIO;
 
        /* Allocate memory for MTD device structure and private data */
-       ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
+       ams_delta_mtd = kzalloc(sizeof(struct mtd_info) +
                                sizeof(struct nand_chip), GFP_KERNEL);
        if (!ams_delta_mtd) {
                printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
@@ -196,10 +196,6 @@ static int ams_delta_init(struct platform_device *pdev)
        /* Get pointer to private data */
        this = (struct nand_chip *) (&ams_delta_mtd[1]);
 
-       /* Initialize structures */
-       memset(ams_delta_mtd, 0, sizeof(struct mtd_info));
-       memset(this, 0, sizeof(struct nand_chip));
-
        /* Link the private data with the MTD structure */
        ams_delta_mtd->priv = this;
 
index a345e7b2463a3f80fb806fe12a19fcf2e72bf13e..d93c849b70b5cf299864a356e955a792e35bd802 100644 (file)
@@ -63,6 +63,10 @@ module_param(on_flash_bbt, int, 0);
 #include "atmel_nand_ecc.h"    /* Hardware ECC registers */
 #include "atmel_nand_nfc.h"    /* Nand Flash Controller definition */
 
+struct atmel_nand_caps {
+       bool pmecc_correct_erase_page;
+};
+
 /* oob layout for large page size
  * bad block info is on bytes 0 and 1
  * the bytes have to be consecutives to avoid
@@ -124,6 +128,7 @@ struct atmel_nand_host {
 
        struct atmel_nfc        *nfc;
 
+       struct atmel_nand_caps  *caps;
        bool                    has_pmecc;
        u8                      pmecc_corr_cap;
        u16                     pmecc_sector_size;
@@ -847,7 +852,11 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
        struct atmel_nand_host *host = nand_chip->priv;
        int i, err_nbr;
        uint8_t *buf_pos;
-       int total_err = 0;
+       int max_bitflips = 0;
+
+       /* If can correct bitfilps from erased page, do the normal check */
+       if (host->caps->pmecc_correct_erase_page)
+               goto normal_check;
 
        for (i = 0; i < nand_chip->ecc.total; i++)
                if (ecc[i] != 0xff)
@@ -874,13 +883,13 @@ normal_check:
                                pmecc_correct_data(mtd, buf_pos, ecc, i,
                                        nand_chip->ecc.bytes, err_nbr);
                                mtd->ecc_stats.corrected += err_nbr;
-                               total_err += err_nbr;
+                               max_bitflips = max_t(int, max_bitflips, err_nbr);
                        }
                }
                pmecc_stat >>= 1;
        }
 
-       return total_err;
+       return max_bitflips;
 }
 
 static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
@@ -1474,6 +1483,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
                ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
 }
 
+static const struct of_device_id atmel_nand_dt_ids[];
+
 static int atmel_of_init_port(struct atmel_nand_host *host,
                              struct device_node *np)
 {
@@ -1483,6 +1494,9 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
        struct atmel_nand_data *board = &host->board;
        enum of_gpio_flags flags = 0;
 
+       host->caps = (struct atmel_nand_caps *)
+               of_match_device(atmel_nand_dt_ids, host->dev)->data;
+
        if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
                if (val >= 32) {
                        dev_err(host->dev, "invalid addr-offset %u\n", val);
@@ -2288,8 +2302,17 @@ static int atmel_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct atmel_nand_caps at91rm9200_caps = {
+       .pmecc_correct_erase_page = false,
+};
+
+static struct atmel_nand_caps sama5d4_caps = {
+       .pmecc_correct_erase_page = true,
+};
+
 static const struct of_device_id atmel_nand_dt_ids[] = {
-       { .compatible = "atmel,at91rm9200-nand" },
+       { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
+       { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
        { /* sentinel */ }
 };
 
index b3b7ca1bafb807f8828b3af853aa7886b87d7e90..f44c6061536a830e66fa7f50b139e6bd0fbf0050 100644 (file)
@@ -1041,7 +1041,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
        index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
 
        /* 3. set memory low address bits 23:8 */
-       index_addr(denali, mode | ((addr & 0xff) << 8), 0x2300);
+       index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
 
        /* 4. interrupt when complete, burst len = 64 bytes */
        index_addr(denali, mode | 0x14000, 0x2400);
@@ -1328,35 +1328,6 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
                break;
        }
 }
-
-/* stubs for ECC functions not used by the NAND core */
-static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
-                               uint8_t *ecc_code)
-{
-       struct denali_nand_info *denali = mtd_to_denali(mtd);
-
-       dev_err(denali->dev, "denali_ecc_calculate called unexpectedly\n");
-       BUG();
-       return -EIO;
-}
-
-static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
-                               uint8_t *read_ecc, uint8_t *calc_ecc)
-{
-       struct denali_nand_info *denali = mtd_to_denali(mtd);
-
-       dev_err(denali->dev, "denali_ecc_correct called unexpectedly\n");
-       BUG();
-       return -EIO;
-}
-
-static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
-{
-       struct denali_nand_info *denali = mtd_to_denali(mtd);
-
-       dev_err(denali->dev, "denali_ecc_hwctl called unexpectedly\n");
-       BUG();
-}
 /* end NAND core entry points */
 
 /* Initialization code to bring the device up to a known good state */
@@ -1609,15 +1580,6 @@ int denali_init(struct denali_nand_info *denali)
        denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift;
        denali->blksperchip = denali->totalblks / denali->nand.numchips;
 
-       /*
-        * These functions are required by the NAND core framework, otherwise,
-        * the NAND core will assert. However, we don't need them, so we'll stub
-        * them out.
-        */
-       denali->nand.ecc.calculate = denali_ecc_calculate;
-       denali->nand.ecc.correct = denali_ecc_correct;
-       denali->nand.ecc.hwctl = denali_ecc_hwctl;
-
        /* override the default read operations */
        denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
        denali->nand.ecc.read_page = denali_read_page;
index 4f3851a24bb2df62cc749da60510ecd8aca0bfc7..33f3c3c54dbc769f1259335722461d742556e3e9 100644 (file)
@@ -1294,14 +1294,6 @@ exit_auxiliary:
  * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
  * ECC-based or raw view of the page is implicit in which function it calls
  * (there is a similar pair of ECC-based/raw functions for writing).
- *
- * FIXME: The following paragraph is incorrect, now that there exist
- * ecc.read_oob_raw and ecc.write_oob_raw functions.
- *
- * Since MTD assumes the OOB is not covered by ECC, there is no pair of
- * ECC-based/raw functions for reading or or writing the OOB. The fact that the
- * caller wants an ECC-based or raw view of the page is not propagated down to
- * this driver.
  */
 static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
                                int page)
@@ -2029,7 +2021,6 @@ static int gpmi_nand_probe(struct platform_device *pdev)
 exit_nfc_init:
        release_resources(this);
 exit_acquire_resources:
-       dev_err(this->dev, "driver registration failed: %d\n", ret);
 
        return ret;
 }
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
new file mode 100644 (file)
index 0000000..289ad3a
--- /dev/null
@@ -0,0 +1,891 @@
+/*
+ * Hisilicon NAND Flash controller driver
+ *
+ * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
+ *              http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou.bry@gmail.com>
+ * The initial developer of the original code is Zhiyong Cai
+ * <caizhiyong@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/nand.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+
+#define HINFC504_MAX_CHIP                               (4)
+#define HINFC504_W_LATCH                                (5)
+#define HINFC504_R_LATCH                                (7)
+#define HINFC504_RW_LATCH                               (3)
+
+#define HINFC504_NFC_TIMEOUT                           (2 * HZ)
+#define HINFC504_NFC_PM_TIMEOUT                                (1 * HZ)
+#define HINFC504_NFC_DMA_TIMEOUT                       (5 * HZ)
+#define HINFC504_CHIP_DELAY                            (25)
+
+#define HINFC504_REG_BASE_ADDRESS_LEN                  (0x100)
+#define HINFC504_BUFFER_BASE_ADDRESS_LEN               (2048 + 128)
+
+#define HINFC504_ADDR_CYCLE_MASK                       0x4
+
+#define HINFC504_CON                                   0x00
+#define HINFC504_CON_OP_MODE_NORMAL                    BIT(0)
+#define HINFC504_CON_PAGEISZE_SHIFT                    (1)
+#define HINFC504_CON_PAGESIZE_MASK                     (0x07)
+#define HINFC504_CON_BUS_WIDTH                         BIT(4)
+#define HINFC504_CON_READY_BUSY_SEL                    BIT(8)
+#define HINFC504_CON_ECCTYPE_SHIFT                     (9)
+#define HINFC504_CON_ECCTYPE_MASK                      (0x07)
+
+#define HINFC504_PWIDTH                                        0x04
+#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
+       ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
+
+#define HINFC504_CMD                                   0x0C
+#define HINFC504_ADDRL                                 0x10
+#define HINFC504_ADDRH                                 0x14
+#define HINFC504_DATA_NUM                              0x18
+
+#define HINFC504_OP                                    0x1C
+#define HINFC504_OP_READ_DATA_EN                       BIT(1)
+#define HINFC504_OP_WAIT_READY_EN                      BIT(2)
+#define HINFC504_OP_CMD2_EN                            BIT(3)
+#define HINFC504_OP_WRITE_DATA_EN                      BIT(4)
+#define HINFC504_OP_ADDR_EN                            BIT(5)
+#define HINFC504_OP_CMD1_EN                            BIT(6)
+#define HINFC504_OP_NF_CS_SHIFT                         (7)
+#define HINFC504_OP_NF_CS_MASK                         (3)
+#define HINFC504_OP_ADDR_CYCLE_SHIFT                   (9)
+#define HINFC504_OP_ADDR_CYCLE_MASK                    (7)
+
+#define HINFC504_STATUS                                 0x20
+#define HINFC504_READY                                 BIT(0)
+
+#define HINFC504_INTEN                                 0x24
+#define HINFC504_INTEN_DMA                             BIT(9)
+#define HINFC504_INTEN_UE                              BIT(6)
+#define HINFC504_INTEN_CE                              BIT(5)
+
+#define HINFC504_INTS                                  0x28
+#define HINFC504_INTS_DMA                              BIT(9)
+#define HINFC504_INTS_UE                               BIT(6)
+#define HINFC504_INTS_CE                               BIT(5)
+
+#define HINFC504_INTCLR                                 0x2C
+#define HINFC504_INTCLR_DMA                            BIT(9)
+#define HINFC504_INTCLR_UE                             BIT(6)
+#define HINFC504_INTCLR_CE                             BIT(5)
+
+#define HINFC504_ECC_STATUS                             0x5C
+#define HINFC504_ECC_16_BIT_SHIFT                       12
+
+#define HINFC504_DMA_CTRL                              0x60
+#define HINFC504_DMA_CTRL_DMA_START                    BIT(0)
+#define HINFC504_DMA_CTRL_WE                           BIT(1)
+#define HINFC504_DMA_CTRL_DATA_AREA_EN                 BIT(2)
+#define HINFC504_DMA_CTRL_OOB_AREA_EN                  BIT(3)
+#define HINFC504_DMA_CTRL_BURST4_EN                    BIT(4)
+#define HINFC504_DMA_CTRL_BURST8_EN                    BIT(5)
+#define HINFC504_DMA_CTRL_BURST16_EN                   BIT(6)
+#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT               (7)
+#define HINFC504_DMA_CTRL_ADDR_NUM_MASK                 (1)
+#define HINFC504_DMA_CTRL_CS_SHIFT                     (8)
+#define HINFC504_DMA_CTRL_CS_MASK                      (0x03)
+
+#define HINFC504_DMA_ADDR_DATA                         0x64
+#define HINFC504_DMA_ADDR_OOB                          0x68
+
+#define HINFC504_DMA_LEN                               0x6C
+#define HINFC504_DMA_LEN_OOB_SHIFT                     (16)
+#define HINFC504_DMA_LEN_OOB_MASK                      (0xFFF)
+
+#define HINFC504_DMA_PARA                              0x70
+#define HINFC504_DMA_PARA_DATA_RW_EN                   BIT(0)
+#define HINFC504_DMA_PARA_OOB_RW_EN                    BIT(1)
+#define HINFC504_DMA_PARA_DATA_EDC_EN                  BIT(2)
+#define HINFC504_DMA_PARA_OOB_EDC_EN                   BIT(3)
+#define HINFC504_DMA_PARA_DATA_ECC_EN                  BIT(4)
+#define HINFC504_DMA_PARA_OOB_ECC_EN                   BIT(5)
+
+#define HINFC_VERSION                                   0x74
+#define HINFC504_LOG_READ_ADDR                         0x7C
+#define HINFC504_LOG_READ_LEN                          0x80
+
+#define HINFC504_NANDINFO_LEN                          0x10
+
+struct hinfc_host {
+       struct nand_chip        chip;
+       struct mtd_info         mtd;
+       struct device           *dev;
+       void __iomem            *iobase;
+       void __iomem            *mmio;
+       struct completion       cmd_complete;
+       unsigned int            offset;
+       unsigned int            command;
+       int                     chipselect;
+       unsigned int            addr_cycle;
+       u32                     addr_value[2];
+       u32                     cache_addr_value[2];
+       char                    *buffer;
+       dma_addr_t              dma_buffer;
+       dma_addr_t              dma_oob;
+       int                     version;
+       unsigned int            irq_status; /* interrupt status */
+};
+
+static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
+{
+       return readl(host->iobase + reg);
+}
+
+static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
+                              unsigned int reg)
+{
+       writel(value, host->iobase + reg);
+}
+
+static void wait_controller_finished(struct hinfc_host *host)
+{
+       unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
+       int val;
+
+       while (time_before(jiffies, timeout)) {
+               val = hinfc_read(host, HINFC504_STATUS);
+               if (host->command == NAND_CMD_ERASE2) {
+                       /* nfc is ready */
+                       while (!(val & HINFC504_READY)) {
+                               usleep_range(500, 1000);
+                               val = hinfc_read(host, HINFC504_STATUS);
+                       }
+                       return;
+               }
+
+               if (val & HINFC504_READY)
+                       return;
+       }
+
+       /* wait cmd timeout */
+       dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
+}
+
+static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
+{
+       struct mtd_info *mtd = &host->mtd;
+       struct nand_chip *chip = mtd->priv;
+       unsigned long val;
+       int ret;
+
+       hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
+       hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
+
+       if (chip->ecc.mode == NAND_ECC_NONE) {
+               hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
+                       << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
+
+               hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+                       | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
+       } else {
+               if (host->command == NAND_CMD_READOOB)
+                       hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
+                       | HINFC504_DMA_PARA_OOB_EDC_EN
+                       | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+               else
+                       hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+                       | HINFC504_DMA_PARA_OOB_RW_EN
+                       | HINFC504_DMA_PARA_DATA_EDC_EN
+                       | HINFC504_DMA_PARA_OOB_EDC_EN
+                       | HINFC504_DMA_PARA_DATA_ECC_EN
+                       | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+
+       }
+
+       val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
+               | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
+               | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
+               | ((host->addr_cycle == 4 ? 1 : 0)
+                       << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
+               | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
+                       << HINFC504_DMA_CTRL_CS_SHIFT));
+
+       if (todev)
+               val |= HINFC504_DMA_CTRL_WE;
+
+       init_completion(&host->cmd_complete);
+
+       hinfc_write(host, val, HINFC504_DMA_CTRL);
+       ret = wait_for_completion_timeout(&host->cmd_complete,
+                       HINFC504_NFC_DMA_TIMEOUT);
+
+       if (!ret) {
+               dev_err(host->dev, "DMA operation(irq) timeout!\n");
+               /* sanity check */
+               val = hinfc_read(host, HINFC504_DMA_CTRL);
+               if (!(val & HINFC504_DMA_CTRL_DMA_START))
+                       dev_err(host->dev, "DMA is already done but without irq ACK!\n");
+               else
+                       dev_err(host->dev, "DMA is really timeout!\n");
+       }
+}
+
+static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
+{
+       host->addr_value[0] &= 0xffff0000;
+
+       hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+       hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+       hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
+                   HINFC504_CMD);
+
+       hisi_nfc_dma_transfer(host, 1);
+
+       return 0;
+}
+
+static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
+{
+       struct mtd_info *mtd = &host->mtd;
+
+       if ((host->addr_value[0] == host->cache_addr_value[0]) &&
+           (host->addr_value[1] == host->cache_addr_value[1]))
+               return 0;
+
+       host->addr_value[0] &= 0xffff0000;
+
+       hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+       hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+       hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
+                   HINFC504_CMD);
+
+       hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
+       hinfc_write(host, mtd->writesize + mtd->oobsize,
+                   HINFC504_LOG_READ_LEN);
+
+       hisi_nfc_dma_transfer(host, 0);
+
+       host->cache_addr_value[0] = host->addr_value[0];
+       host->cache_addr_value[1] = host->addr_value[1];
+
+       return 0;
+}
+
+static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
+{
+       hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+       hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
+                   HINFC504_CMD);
+
+       hinfc_write(host, HINFC504_OP_WAIT_READY_EN
+               | HINFC504_OP_CMD2_EN
+               | HINFC504_OP_CMD1_EN
+               | HINFC504_OP_ADDR_EN
+               | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+                       << HINFC504_OP_NF_CS_SHIFT)
+               | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
+                       << HINFC504_OP_ADDR_CYCLE_SHIFT),
+               HINFC504_OP);
+
+       wait_controller_finished(host);
+
+       return 0;
+}
+
+static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
+{
+       hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+       hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
+       hinfc_write(host, 0, HINFC504_ADDRL);
+
+       hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
+               | HINFC504_OP_READ_DATA_EN
+               | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+                       << HINFC504_OP_NF_CS_SHIFT)
+               | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
+
+       wait_controller_finished(host);
+
+       return 0;
+}
+
+static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
+{
+       hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+       hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
+       hinfc_write(host, HINFC504_OP_CMD1_EN
+               | HINFC504_OP_READ_DATA_EN
+               | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+                       << HINFC504_OP_NF_CS_SHIFT),
+               HINFC504_OP);
+
+       wait_controller_finished(host);
+
+       return 0;
+}
+
+static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
+{
+       hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
+
+       hinfc_write(host, HINFC504_OP_CMD1_EN
+               | ((chipselect & HINFC504_OP_NF_CS_MASK)
+                       << HINFC504_OP_NF_CS_SHIFT)
+               | HINFC504_OP_WAIT_READY_EN,
+               HINFC504_OP);
+
+       wait_controller_finished(host);
+
+       return 0;
+}
+
+static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+
+       if (chipselect < 0)
+               return;
+
+       host->chipselect = chipselect;
+}
+
+static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+
+       if (host->command == NAND_CMD_STATUS)
+               return *(uint8_t *)(host->mmio);
+
+       host->offset++;
+
+       if (host->command == NAND_CMD_READID)
+               return *(uint8_t *)(host->mmio + host->offset - 1);
+
+       return *(uint8_t *)(host->buffer + host->offset - 1);
+}
+
+static u16 hisi_nfc_read_word(struct mtd_info *mtd)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+
+       host->offset += 2;
+       return *(u16 *)(host->buffer + host->offset - 2);
+}
+
+static void
+hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+
+       memcpy(host->buffer + host->offset, buf, len);
+       host->offset += len;
+}
+
+static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+
+       memcpy(buf, host->buffer + host->offset, len);
+       host->offset += len;
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+       unsigned int command = host->command;
+
+       host->addr_cycle    = 0;
+       host->addr_value[0] = 0;
+       host->addr_value[1] = 0;
+
+       /* Serially input address */
+       if (column != -1) {
+               /* Adjust columns for 16 bit buswidth */
+               if (chip->options & NAND_BUSWIDTH_16 &&
+                               !nand_opcode_8bits(command))
+                       column >>= 1;
+
+               host->addr_value[0] = column & 0xffff;
+               host->addr_cycle    = 2;
+       }
+       if (page_addr != -1) {
+               host->addr_value[0] |= (page_addr & 0xffff)
+                       << (host->addr_cycle * 8);
+               host->addr_cycle    += 2;
+               /* One more address cycle for devices > 128MiB */
+               if (chip->chipsize > (128 << 20)) {
+                       host->addr_cycle += 1;
+                       if (host->command == NAND_CMD_ERASE1)
+                               host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+                       else
+                               host->addr_value[1] |= ((page_addr >> 16) & 0xff);
+               }
+       }
+}
+
+static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
+               int page_addr)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct hinfc_host *host = chip->priv;
+       int is_cache_invalid = 1;
+       unsigned int flag = 0;
+
+       host->command =  command;
+
+       switch (command) {
+       case NAND_CMD_READ0:
+       case NAND_CMD_READOOB:
+               if (command == NAND_CMD_READ0)
+                       host->offset = column;
+               else
+                       host->offset = column + mtd->writesize;
+
+               is_cache_invalid = 0;
+               set_addr(mtd, column, page_addr);
+               hisi_nfc_send_cmd_readstart(host);
+               break;
+
+       case NAND_CMD_SEQIN:
+               host->offset = column;
+               set_addr(mtd, column, page_addr);
+               break;
+
+       case NAND_CMD_ERASE1:
+               set_addr(mtd, column, page_addr);
+               break;
+
+       case NAND_CMD_PAGEPROG:
+               hisi_nfc_send_cmd_pageprog(host);
+               break;
+
+       case NAND_CMD_ERASE2:
+               hisi_nfc_send_cmd_erase(host);
+               break;
+
+       case NAND_CMD_READID:
+               host->offset = column;
+               memset(host->mmio, 0, 0x10);
+               hisi_nfc_send_cmd_readid(host);
+               break;
+
+       case NAND_CMD_STATUS:
+               flag = hinfc_read(host, HINFC504_CON);
+               if (chip->ecc.mode == NAND_ECC_HW)
+                       hinfc_write(host,
+                                   flag & ~(HINFC504_CON_ECCTYPE_MASK <<
+                                   HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
+
+               host->offset = 0;
+               memset(host->mmio, 0, 0x10);
+               hisi_nfc_send_cmd_status(host);
+               hinfc_write(host, flag, HINFC504_CON);
+               break;
+
+       case NAND_CMD_RESET:
+               hisi_nfc_send_cmd_reset(host, host->chipselect);
+               break;
+
+       default:
+               dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
+                       command, column, page_addr);
+       }
+
+       if (is_cache_invalid) {
+               host->cache_addr_value[0] = ~0;
+               host->cache_addr_value[1] = ~0;
+       }
+}
+
+static irqreturn_t hinfc_irq_handle(int irq, void *devid)
+{
+       struct hinfc_host *host = devid;
+       unsigned int flag;
+
+       flag = hinfc_read(host, HINFC504_INTS);
+       /* store interrupts state */
+       host->irq_status |= flag;
+
+       if (flag & HINFC504_INTS_DMA) {
+               hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
+               complete(&host->cmd_complete);
+       } else if (flag & HINFC504_INTS_CE) {
+               hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
+       } else if (flag & HINFC504_INTS_UE) {
+               hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
+       struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+       struct hinfc_host *host = chip->priv;
+       int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
+       int stat_1, stat_2;
+
+       chip->read_buf(mtd, buf, mtd->writesize);
+       chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+       /* errors which can not be corrected by ECC */
+       if (host->irq_status & HINFC504_INTS_UE) {
+               mtd->ecc_stats.failed++;
+       } else if (host->irq_status & HINFC504_INTS_CE) {
+               /* TODO: need add other ECC modes! */
+               switch (chip->ecc.strength) {
+               case 16:
+                       status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
+                                       HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
+                       stat_2 = status_ecc & 0x3f;
+                       stat_1 = status_ecc >> 6 & 0x3f;
+                       stat = stat_1 + stat_2;
+                       stat_max = max_t(int, stat_1, stat_2);
+               }
+               mtd->ecc_stats.corrected += stat;
+               max_bitflips = max_t(int, max_bitflips, stat_max);
+       }
+       host->irq_status = 0;
+
+       return max_bitflips;
+}
+
+static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+                               int page)
+{
+       struct hinfc_host *host = chip->priv;
+
+       chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+       chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+       if (host->irq_status & HINFC504_INTS_UE) {
+               host->irq_status = 0;
+               return -EBADMSG;
+       }
+
+       host->irq_status = 0;
+       return 0;
+}
+
+static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
+               struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+       chip->write_buf(mtd, buf, mtd->writesize);
+       if (oob_required)
+               chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+       return 0;
+}
+
+static void hisi_nfc_host_init(struct hinfc_host *host)
+{
+       struct nand_chip *chip = &host->chip;
+       unsigned int flag = 0;
+
+       host->version = hinfc_read(host, HINFC_VERSION);
+       host->addr_cycle                = 0;
+       host->addr_value[0]             = 0;
+       host->addr_value[1]             = 0;
+       host->cache_addr_value[0]       = ~0;
+       host->cache_addr_value[1]       = ~0;
+       host->chipselect                = 0;
+
+       /* default page size: 2K, ecc_none. need modify */
+       flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
+               | ((0x001 & HINFC504_CON_PAGESIZE_MASK)
+                       << HINFC504_CON_PAGEISZE_SHIFT)
+               | ((0x0 & HINFC504_CON_ECCTYPE_MASK)
+                       << HINFC504_CON_ECCTYPE_SHIFT)
+               | ((chip->options & NAND_BUSWIDTH_16) ?
+                       HINFC504_CON_BUS_WIDTH : 0);
+       hinfc_write(host, flag, HINFC504_CON);
+
+       memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
+
+       hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+                   HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+       /* enable DMA irq */
+       hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
+}
+
+static struct nand_ecclayout nand_ecc_2K_16bits = {
+       .oobavail = 6,
+       .oobfree = { {2, 6} },
+};
+
+static int hisi_nfc_ecc_probe(struct hinfc_host *host)
+{
+       unsigned int flag;
+       int size, strength, ecc_bits;
+       struct device *dev = host->dev;
+       struct nand_chip *chip = &host->chip;
+       struct mtd_info *mtd = &host->mtd;
+       struct device_node *np = host->dev->of_node;
+
+       size = of_get_nand_ecc_step_size(np);
+       strength = of_get_nand_ecc_strength(np);
+       if (size != 1024) {
+               dev_err(dev, "error ecc size: %d\n", size);
+               return -EINVAL;
+       }
+
+       if ((size == 1024) && ((strength != 8) && (strength != 16) &&
+                               (strength != 24) && (strength != 40))) {
+               dev_err(dev, "ecc size and strength do not match\n");
+               return -EINVAL;
+       }
+
+       chip->ecc.size = size;
+       chip->ecc.strength = strength;
+
+       chip->ecc.read_page = hisi_nand_read_page_hwecc;
+       chip->ecc.read_oob = hisi_nand_read_oob;
+       chip->ecc.write_page = hisi_nand_write_page_hwecc;
+
+       switch (chip->ecc.strength) {
+       case 16:
+               ecc_bits = 6;
+               if (mtd->writesize == 2048)
+                       chip->ecc.layout = &nand_ecc_2K_16bits;
+
+               /* TODO: add more page size support */
+               break;
+
+       /* TODO: add more ecc strength support */
+       default:
+               dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
+               return -EINVAL;
+       }
+
+       flag = hinfc_read(host, HINFC504_CON);
+       /* add ecc type configure */
+       flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
+                                               << HINFC504_CON_ECCTYPE_SHIFT);
+       hinfc_write(host, flag, HINFC504_CON);
+
+       /* enable ecc irq */
+       flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
+       hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
+                   HINFC504_INTEN);
+
+       return 0;
+}
+
+static int hisi_nfc_probe(struct platform_device *pdev)
+{
+       int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP;
+       struct device *dev = &pdev->dev;
+       struct hinfc_host *host;
+       struct nand_chip  *chip;
+       struct mtd_info   *mtd;
+       struct resource   *res;
+       struct device_node *np = dev->of_node;
+       struct mtd_part_parser_data ppdata;
+
+       host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+       if (!host)
+               return -ENOMEM;
+       host->dev = dev;
+
+       platform_set_drvdata(pdev, host);
+       chip = &host->chip;
+       mtd  = &host->mtd;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "no IRQ resource defined\n");
+               ret = -ENXIO;
+               goto err_res;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       host->iobase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(host->iobase)) {
+               ret = PTR_ERR(host->iobase);
+               goto err_res;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       host->mmio = devm_ioremap_resource(dev, res);
+       if (IS_ERR(host->mmio)) {
+               ret = PTR_ERR(host->mmio);
+               dev_err(dev, "devm_ioremap_resource[1] fail\n");
+               goto err_res;
+       }
+
+       mtd->priv               = chip;
+       mtd->owner              = THIS_MODULE;
+       mtd->name               = "hisi_nand";
+       mtd->dev.parent         = &pdev->dev;
+
+       chip->priv              = host;
+       chip->cmdfunc           = hisi_nfc_cmdfunc;
+       chip->select_chip       = hisi_nfc_select_chip;
+       chip->read_byte         = hisi_nfc_read_byte;
+       chip->read_word         = hisi_nfc_read_word;
+       chip->write_buf         = hisi_nfc_write_buf;
+       chip->read_buf          = hisi_nfc_read_buf;
+       chip->chip_delay        = HINFC504_CHIP_DELAY;
+
+       chip->ecc.mode = of_get_nand_ecc_mode(np);
+
+       buswidth = of_get_nand_bus_width(np);
+       if (buswidth == 16)
+               chip->options |= NAND_BUSWIDTH_16;
+
+       hisi_nfc_host_init(host);
+
+       ret = devm_request_irq(dev, irq, hinfc_irq_handle, IRQF_DISABLED,
+                               "nandc", host);
+       if (ret) {
+               dev_err(dev, "failed to request IRQ\n");
+               goto err_res;
+       }
+
+       ret = nand_scan_ident(mtd, max_chips, NULL);
+       if (ret) {
+               ret = -ENODEV;
+               goto err_res;
+       }
+
+       host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
+               &host->dma_buffer, GFP_KERNEL);
+       if (!host->buffer) {
+               ret = -ENOMEM;
+               goto err_res;
+       }
+
+       host->dma_oob = host->dma_buffer + mtd->writesize;
+       memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+       flag = hinfc_read(host, HINFC504_CON);
+       flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+       switch (mtd->writesize) {
+       case 2048:
+               flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
+       /*
+        * TODO: add more pagesize support,
+        * default pagesize has been set in hisi_nfc_host_init
+        */
+       default:
+               dev_err(dev, "NON-2KB page size nand flash\n");
+               ret = -EINVAL;
+               goto err_res;
+       }
+       hinfc_write(host, flag, HINFC504_CON);
+
+       if (chip->ecc.mode == NAND_ECC_HW)
+               hisi_nfc_ecc_probe(host);
+
+       ret = nand_scan_tail(mtd);
+       if (ret) {
+               dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+               goto err_res;
+       }
+
+       ppdata.of_node = np;
+       ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+       if (ret) {
+               dev_err(dev, "Err MTD partition=%d\n", ret);
+               goto err_mtd;
+       }
+
+       return 0;
+
+err_mtd:
+       nand_release(mtd);
+err_res:
+       return ret;
+}
+
+static int hisi_nfc_remove(struct platform_device *pdev)
+{
+       struct hinfc_host *host = platform_get_drvdata(pdev);
+       struct mtd_info *mtd = &host->mtd;
+
+       nand_release(mtd);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_nfc_suspend(struct device *dev)
+{
+       struct hinfc_host *host = dev_get_drvdata(dev);
+       unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
+
+       while (time_before(jiffies, timeout)) {
+               if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
+                   (hinfc_read(host, HINFC504_DMA_CTRL) &
+                    HINFC504_DMA_CTRL_DMA_START)) {
+                       cond_resched();
+                       return 0;
+               }
+       }
+
+       dev_err(host->dev, "nand controller suspend timeout.\n");
+
+       return -EAGAIN;
+}
+
+static int hisi_nfc_resume(struct device *dev)
+{
+       int cs;
+       struct hinfc_host *host = dev_get_drvdata(dev);
+       struct nand_chip *chip = &host->chip;
+
+       for (cs = 0; cs < chip->numchips; cs++)
+               hisi_nfc_send_cmd_reset(host, cs);
+       hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+                   HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+       return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
+
+static const struct of_device_id nfc_id_table[] = {
+       { .compatible = "hisilicon,504-nfc" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, nfc_id_table);
+
+static struct platform_driver hisi_nfc_driver = {
+       .driver = {
+               .name  = "hisi_nand",
+               .of_match_table = nfc_id_table,
+               .pm = &hisi_nfc_pm_ops,
+       },
+       .probe          = hisi_nfc_probe,
+       .remove         = hisi_nfc_remove,
+};
+
+module_platform_driver(hisi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhou Wang");
+MODULE_AUTHOR("Zhiyong Cai");
+MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
index 1633ec9c51082fb6ad33b90c053ee41886bec332..ebf2cce04cba14a3caa3baf9d503aa17a730e969 100644 (file)
@@ -69,7 +69,7 @@ struct jz_nand {
 
        int selected_bank;
 
-       struct jz_nand_platform_data *pdata;
+       struct gpio_desc *busy_gpio;
        bool is_reading;
 };
 
@@ -131,7 +131,7 @@ static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
 static int jz_nand_dev_ready(struct mtd_info *mtd)
 {
        struct jz_nand *nand = mtd_to_jz_nand(mtd);
-       return gpio_get_value_cansleep(nand->pdata->busy_gpio);
+       return gpiod_get_value_cansleep(nand->busy_gpio);
 }
 
 static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
@@ -423,14 +423,12 @@ static int jz_nand_probe(struct platform_device *pdev)
        if (ret)
                goto err_free;
 
-       if (pdata && gpio_is_valid(pdata->busy_gpio)) {
-               ret = gpio_request(pdata->busy_gpio, "NAND busy pin");
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "Failed to request busy gpio %d: %d\n",
-                               pdata->busy_gpio, ret);
-                       goto err_iounmap_mmio;
-               }
+       nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN);
+       if (IS_ERR(nand->busy_gpio)) {
+               ret = PTR_ERR(nand->busy_gpio);
+               dev_err(&pdev->dev, "Failed to request busy gpio %d\n",
+                   ret);
+               goto err_iounmap_mmio;
        }
 
        mtd             = &nand->mtd;
@@ -454,10 +452,9 @@ static int jz_nand_probe(struct platform_device *pdev)
        chip->cmd_ctrl = jz_nand_cmd_ctrl;
        chip->select_chip = jz_nand_select_chip;
 
-       if (pdata && gpio_is_valid(pdata->busy_gpio))
+       if (nand->busy_gpio)
                chip->dev_ready = jz_nand_dev_ready;
 
-       nand->pdata = pdata;
        platform_set_drvdata(pdev, nand);
 
        /* We are going to autodetect NAND chips in the banks specified in the
@@ -496,7 +493,7 @@ static int jz_nand_probe(struct platform_device *pdev)
        }
        if (chipnr == 0) {
                dev_err(&pdev->dev, "No NAND chips found\n");
-               goto err_gpio_busy;
+               goto err_iounmap_mmio;
        }
 
        if (pdata && pdata->ident_callback) {
@@ -533,9 +530,6 @@ err_unclaim_banks:
                                         nand->bank_base[bank - 1]);
        }
        writel(0, nand->base + JZ_REG_NAND_CTRL);
-err_gpio_busy:
-       if (pdata && gpio_is_valid(pdata->busy_gpio))
-               gpio_free(pdata->busy_gpio);
 err_iounmap_mmio:
        jz_nand_iounmap_resource(nand->mem, nand->base);
 err_free:
@@ -546,7 +540,6 @@ err_free:
 static int jz_nand_remove(struct platform_device *pdev)
 {
        struct jz_nand *nand = platform_get_drvdata(pdev);
-       struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
        size_t i;
 
        nand_release(&nand->mtd);
@@ -562,8 +555,6 @@ static int jz_nand_remove(struct platform_device *pdev)
                        gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
                }
        }
-       if (pdata && gpio_is_valid(pdata->busy_gpio))
-               gpio_free(pdata->busy_gpio);
 
        jz_nand_iounmap_resource(nand->mem, nand->base);
 
index 41585dfb206f05caef275c95799ccf53508abb94..df7eb4ff07d156ec213b3656f9840f6e99a35710 100644 (file)
@@ -156,7 +156,6 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
 }
 
 /**
- * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  * @mtd: MTD device structure
  *
@@ -1751,11 +1750,10 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
 static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
                                  int page)
 {
-       uint8_t *buf = chip->oob_poi;
        int length = mtd->oobsize;
        int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
        int eccsize = chip->ecc.size;
-       uint8_t *bufpoi = buf;
+       uint8_t *bufpoi = chip->oob_poi;
        int i, toread, sndrnd = 0, pos;
 
        chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
@@ -2944,6 +2942,16 @@ static void nand_resume(struct mtd_info *mtd)
                        __func__);
 }
 
+/**
+ * nand_shutdown - [MTD Interface] Finish the current NAND operation and
+ *                 prevent further operations
+ * @mtd: MTD device structure
+ */
+static void nand_shutdown(struct mtd_info *mtd)
+{
+       nand_get_device(mtd, FL_SHUTDOWN);
+}
+
 /* Set default functions */
 static void nand_set_defaults(struct nand_chip *chip, int busw)
 {
@@ -4028,22 +4036,24 @@ int nand_scan_tail(struct mtd_info *mtd)
                ecc->read_oob = nand_read_oob_std;
                ecc->write_oob = nand_write_oob_std;
                /*
-                * Board driver should supply ecc.size and ecc.bytes values to
-                * select how many bits are correctable; see nand_bch_init()
-                * for details. Otherwise, default to 4 bits for large page
-                * devices.
+                * Board driver should supply ecc.size and ecc.strength values
+                * to select how many bits are correctable. Otherwise, default
+                * to 4 bits for large page devices.
                 */
                if (!ecc->size && (mtd->oobsize >= 64)) {
                        ecc->size = 512;
-                       ecc->bytes = DIV_ROUND_UP(13 * ecc->strength, 8);
+                       ecc->strength = 4;
                }
+
+               /* See nand_bch_init() for details. */
+               ecc->bytes = DIV_ROUND_UP(
+                               ecc->strength * fls(8 * ecc->size), 8);
                ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
                                               &ecc->layout);
                if (!ecc->priv) {
                        pr_warn("BCH ECC initialization failed!\n");
                        BUG();
                }
-               ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
                break;
 
        case NAND_ECC_NONE:
@@ -4146,6 +4156,7 @@ int nand_scan_tail(struct mtd_info *mtd)
        mtd->_unlock = NULL;
        mtd->_suspend = nand_suspend;
        mtd->_resume = nand_resume;
+       mtd->_reboot = nand_shutdown;
        mtd->_block_isreserved = nand_block_isreserved;
        mtd->_block_isbad = nand_block_isbad;
        mtd->_block_markbad = nand_block_markbad;
@@ -4161,7 +4172,7 @@ int nand_scan_tail(struct mtd_info *mtd)
         * properly set.
         */
        if (!mtd->bitflip_threshold)
-               mtd->bitflip_threshold = mtd->ecc_strength;
+               mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
 
        /* Check, if we should skip the bad block table scan */
        if (chip->options & NAND_SKIP_BBTSCAN)
index ab5bbf5674394438555dcb43df7ea2a0ccfb1cec..f2324271b94e9d19cc39aafae95cc0ccf934fc42 100644 (file)
@@ -245,7 +245,6 @@ MODULE_PARM_DESC(bch,                "Enable BCH ecc and set how many bits should "
 #define STATE_DATAOUT          0x00001000 /* waiting for page data output */
 #define STATE_DATAOUT_ID       0x00002000 /* waiting for ID bytes output */
 #define STATE_DATAOUT_STATUS   0x00003000 /* waiting for status output */
-#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
 #define STATE_DATAOUT_MASK     0x00007000 /* data output states mask */
 
 /* Previous operation is done, ready to accept new requests */
@@ -269,7 +268,6 @@ MODULE_PARM_DESC(bch,                "Enable BCH ecc and set how many bits should "
 #define OPT_ANY          0xFFFFFFFF /* any chip supports this operation */
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
-#define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -1096,8 +1094,6 @@ static char *get_state_name(uint32_t state)
                        return "STATE_DATAOUT_ID";
                case STATE_DATAOUT_STATUS:
                        return "STATE_DATAOUT_STATUS";
-               case STATE_DATAOUT_STATUS_M:
-                       return "STATE_DATAOUT_STATUS_M";
                case STATE_READY:
                        return "STATE_READY";
                case STATE_UNKNOWN:
@@ -1865,7 +1861,6 @@ static void switch_state(struct nandsim *ns)
                                break;
 
                        case STATE_DATAOUT_STATUS:
-                       case STATE_DATAOUT_STATUS_M:
                                ns->regs.count = ns->regs.num = 0;
                                break;
 
@@ -2005,7 +2000,6 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
                }
 
                if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
-                       || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
                        || NS_STATE(ns->state) == STATE_DATAOUT) {
                        int row = ns->regs.row;
 
@@ -2343,6 +2337,7 @@ static int __init ns_init_module(void)
                }
                chip->ecc.mode = NAND_ECC_SOFT_BCH;
                chip->ecc.size = 512;
+               chip->ecc.strength = bch;
                chip->ecc.bytes = eccbytes;
                NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
        }
index 63f858e6bf3987a03c7564c3fc3db0da0c3f106f..60fa89939c24861e487354d033fb6c2b8d95eeaf 100644 (file)
@@ -1048,10 +1048,9 @@ static int omap_dev_ready(struct mtd_info *mtd)
  * @mtd: MTD device structure
  * @mode: Read/Write mode
  *
- * When using BCH, sector size is hardcoded to 512 bytes.
- * Using wrapping mode 6 both for reading and writing if ELM module not uses
- * for error correction.
- * On writing,
+ * When using BCH with SW correction (i.e. no ELM), sector size is set
+ * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
+ * for both reading and writing with:
  * eccsize0 = 0  (no additional protected byte in spare area)
  * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
  */
@@ -1071,15 +1070,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
        case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
                bch_type = 0;
                nsectors = 1;
-               if (mode == NAND_ECC_READ) {
-                       wr_mode   = BCH_WRAPMODE_6;
-                       ecc_size0 = BCH_ECC_SIZE0;
-                       ecc_size1 = BCH_ECC_SIZE1;
-               } else {
-                       wr_mode   = BCH_WRAPMODE_6;
-                       ecc_size0 = BCH_ECC_SIZE0;
-                       ecc_size1 = BCH_ECC_SIZE1;
-               }
+               wr_mode   = BCH_WRAPMODE_6;
+               ecc_size0 = BCH_ECC_SIZE0;
+               ecc_size1 = BCH_ECC_SIZE1;
                break;
        case OMAP_ECC_BCH4_CODE_HW:
                bch_type = 0;
@@ -1097,15 +1090,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
        case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
                bch_type = 1;
                nsectors = 1;
-               if (mode == NAND_ECC_READ) {
-                       wr_mode   = BCH_WRAPMODE_6;
-                       ecc_size0 = BCH_ECC_SIZE0;
-                       ecc_size1 = BCH_ECC_SIZE1;
-               } else {
-                       wr_mode   = BCH_WRAPMODE_6;
-                       ecc_size0 = BCH_ECC_SIZE0;
-                       ecc_size1 = BCH_ECC_SIZE1;
-               }
+               wr_mode   = BCH_WRAPMODE_6;
+               ecc_size0 = BCH_ECC_SIZE0;
+               ecc_size1 = BCH_ECC_SIZE1;
                break;
        case OMAP_ECC_BCH8_CODE_HW:
                bch_type = 1;
index ccaa8e28338855effbf10724f9b6fae2df215466..6f93b2990d250e76007ee355aed8054752aa33f9 100644 (file)
@@ -1110,8 +1110,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
 
        switch (ecc->mode) {
        case NAND_ECC_SOFT_BCH:
-               ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * ecc->size),
-                                         8);
                break;
        case NAND_ECC_HW:
                ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
index 51b9d6af307f616193ebc006b492a5fecedd535b..a5dfbfbebfcafe7a2d4b7161a186cea565fe9ff1 100644 (file)
@@ -89,9 +89,10 @@ static int find_boot_record(struct NFTLrecord *nftl)
                }
 
                /* To be safer with BIOS, also use erase mark as discriminant */
-               if ((ret = nftl_read_oob(mtd, block * nftl->EraseSize +
+               ret = nftl_read_oob(mtd, block * nftl->EraseSize +
                                         SECTORSIZE + 8, 8, &retlen,
-                                        (char *)&h1) < 0)) {
+                                        (char *)&h1);
+               if (ret < 0) {
                        printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
                               block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
                        continue;
@@ -109,8 +110,9 @@ static int find_boot_record(struct NFTLrecord *nftl)
                }
 
                /* Finally reread to check ECC */
-               if ((ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
-                                    &retlen, buf) < 0)) {
+               ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
+                               &retlen, buf);
+               if (ret < 0) {
                        printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
                               block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
                        continue;
@@ -228,9 +230,11 @@ device is already correct.
 The new DiskOnChip driver already scanned the bad block table.  Just query it.
                        if ((i & (SECTORSIZE - 1)) == 0) {
                                /* read one sector for every SECTORSIZE of blocks */
-                               if ((ret = mtd->read(nftl->mbd.mtd, block * nftl->EraseSize +
-                                                    i + SECTORSIZE, SECTORSIZE, &retlen,
-                                                    buf)) < 0) {
+                               ret = mtd->read(nftl->mbd.mtd,
+                                               block * nftl->EraseSize + i +
+                                               SECTORSIZE, SECTORSIZE,
+                                               &retlen, buf);
+                               if (ret < 0) {
                                        printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
                                               ret);
                                        kfree(nftl->ReplUnitTable);
index 39763b94f67d48bcc4ec9a83b5481b3f12b33ceb..1c7308c2c77d9b54beb22ad626a0b9d813052a48 100644 (file)
@@ -57,7 +57,9 @@
 
 #define QUADSPI_BUF3CR                 0x1c
 #define QUADSPI_BUF3CR_ALLMST_SHIFT    31
-#define QUADSPI_BUF3CR_ALLMST          (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
+#define QUADSPI_BUF3CR_ALLMST_MASK     (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
+#define QUADSPI_BUF3CR_ADATSZ_SHIFT            8
+#define QUADSPI_BUF3CR_ADATSZ_MASK     (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
 
 #define QUADSPI_BFGENCR                        0x20
 #define QUADSPI_BFGENCR_PAR_EN_SHIFT   16
@@ -198,18 +200,21 @@ struct fsl_qspi_devtype_data {
        enum fsl_qspi_devtype devtype;
        int rxfifo;
        int txfifo;
+       int ahb_buf_size;
 };
 
 static struct fsl_qspi_devtype_data vybrid_data = {
        .devtype = FSL_QUADSPI_VYBRID,
        .rxfifo = 128,
-       .txfifo = 64
+       .txfifo = 64,
+       .ahb_buf_size = 1024
 };
 
 static struct fsl_qspi_devtype_data imx6sx_data = {
        .devtype = FSL_QUADSPI_IMX6SX,
        .rxfifo = 128,
-       .txfifo = 512
+       .txfifo = 512,
+       .ahb_buf_size = 1024
 };
 
 #define FSL_QSPI_MAX_CHIP      4
@@ -227,6 +232,7 @@ struct fsl_qspi {
        u32 nor_num;
        u32 clk_rate;
        unsigned int chip_base_addr; /* We may support two chips. */
+       bool has_second_chip;
 };
 
 static inline int is_vybrid_qspi(struct fsl_qspi *q)
@@ -583,7 +589,12 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
        writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
        writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
        writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
-       writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR);
+       /*
+        * Set ADATSZ with the maximum AHB buffer size to improve the
+        * read performance.
+        */
+       writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8)
+                       << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR);
 
        /* We only use the buffer3 */
        writel(0, base + QUADSPI_BUF0IND);
@@ -783,7 +794,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
        struct spi_nor *nor;
        struct mtd_info *mtd;
        int ret, i = 0;
-       bool has_second_chip = false;
        const struct of_device_id *of_id =
                        of_match_device(fsl_qspi_dt_ids, &pdev->dev);
 
@@ -798,37 +808,30 @@ static int fsl_qspi_probe(struct platform_device *pdev)
        /* find the resources */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
        q->iobase = devm_ioremap_resource(dev, res);
-       if (IS_ERR(q->iobase)) {
-               ret = PTR_ERR(q->iobase);
-               goto map_failed;
-       }
+       if (IS_ERR(q->iobase))
+               return PTR_ERR(q->iobase);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                        "QuadSPI-memory");
        q->ahb_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(q->ahb_base)) {
-               ret = PTR_ERR(q->ahb_base);
-               goto map_failed;
-       }
+       if (IS_ERR(q->ahb_base))
+               return PTR_ERR(q->ahb_base);
+
        q->memmap_phy = res->start;
 
        /* find the clocks */
        q->clk_en = devm_clk_get(dev, "qspi_en");
-       if (IS_ERR(q->clk_en)) {
-               ret = PTR_ERR(q->clk_en);
-               goto map_failed;
-       }
+       if (IS_ERR(q->clk_en))
+               return PTR_ERR(q->clk_en);
 
        q->clk = devm_clk_get(dev, "qspi");
-       if (IS_ERR(q->clk)) {
-               ret = PTR_ERR(q->clk);
-               goto map_failed;
-       }
+       if (IS_ERR(q->clk))
+               return PTR_ERR(q->clk);
 
        ret = clk_prepare_enable(q->clk_en);
        if (ret) {
                dev_err(dev, "can not enable the qspi_en clock\n");
-               goto map_failed;
+               return ret;
        }
 
        ret = clk_prepare_enable(q->clk);
@@ -860,14 +863,14 @@ static int fsl_qspi_probe(struct platform_device *pdev)
                goto irq_failed;
 
        if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
-               has_second_chip = true;
+               q->has_second_chip = true;
 
        /* iterate the subnodes. */
        for_each_available_child_of_node(dev->of_node, np) {
                char modalias[40];
 
                /* skip the holes */
-               if (!has_second_chip)
+               if (!q->has_second_chip)
                        i *= 2;
 
                nor = &q->nor[i];
@@ -890,24 +893,24 @@ static int fsl_qspi_probe(struct platform_device *pdev)
 
                ret = of_modalias_node(np, modalias, sizeof(modalias));
                if (ret < 0)
-                       goto map_failed;
+                       goto irq_failed;
 
                ret = of_property_read_u32(np, "spi-max-frequency",
                                &q->clk_rate);
                if (ret < 0)
-                       goto map_failed;
+                       goto irq_failed;
 
                /* set the chip address for READID */
                fsl_qspi_set_base_addr(q, nor);
 
                ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
                if (ret)
-                       goto map_failed;
+                       goto irq_failed;
 
                ppdata.of_node = np;
                ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
                if (ret)
-                       goto map_failed;
+                       goto irq_failed;
 
                /* Set the correct NOR size now. */
                if (q->nor_size == 0) {
@@ -939,19 +942,19 @@ static int fsl_qspi_probe(struct platform_device *pdev)
 
        clk_disable(q->clk);
        clk_disable(q->clk_en);
-       dev_info(dev, "QuadSPI SPI NOR flash driver\n");
        return 0;
 
 last_init_failed:
-       for (i = 0; i < q->nor_num; i++)
+       for (i = 0; i < q->nor_num; i++) {
+               /* skip the holes */
+               if (!q->has_second_chip)
+                       i *= 2;
                mtd_device_unregister(&q->mtd[i]);
-
+       }
 irq_failed:
        clk_disable_unprepare(q->clk);
 clk_failed:
        clk_disable_unprepare(q->clk_en);
-map_failed:
-       dev_err(dev, "Freescale QuadSPI probe failed\n");
        return ret;
 }
 
@@ -960,8 +963,12 @@ static int fsl_qspi_remove(struct platform_device *pdev)
        struct fsl_qspi *q = platform_get_drvdata(pdev);
        int i;
 
-       for (i = 0; i < q->nor_num; i++)
+       for (i = 0; i < q->nor_num; i++) {
+               /* skip the holes */
+               if (!q->has_second_chip)
+                       i *= 2;
                mtd_device_unregister(&q->mtd[i]);
+       }
 
        /* disable the hardware */
        writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
@@ -972,6 +979,22 @@ static int fsl_qspi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       return 0;
+}
+
+static int fsl_qspi_resume(struct platform_device *pdev)
+{
+       struct fsl_qspi *q = platform_get_drvdata(pdev);
+
+       fsl_qspi_nor_setup(q);
+       fsl_qspi_set_map_addr(q);
+       fsl_qspi_nor_setup_last(q);
+
+       return 0;
+}
+
 static struct platform_driver fsl_qspi_driver = {
        .driver = {
                .name   = "fsl-quadspi",
@@ -980,6 +1003,8 @@ static struct platform_driver fsl_qspi_driver = {
        },
        .probe          = fsl_qspi_probe,
        .remove         = fsl_qspi_remove,
+       .suspend        = fsl_qspi_suspend,
+       .resume         = fsl_qspi_resume,
 };
 module_platform_driver(fsl_qspi_driver);
 
index 0f8ec3c2d015815673c5a6e51f351fe55cbe48de..b6a5a0c269e1d29f2dbf46f2ab2f2117dbe96417 100644 (file)
@@ -538,6 +538,7 @@ static const struct spi_device_id spi_nor_ids[] = {
        /* GigaDevice */
        { "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) },
        { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+       { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
 
        /* Intel/Numonyx -- xxxs33b */
        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
@@ -560,14 +561,14 @@ static const struct spi_device_id spi_nor_ids[] = {
        { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
 
        /* Micron */
-       { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64, 0) },
-       { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, 0) },
-       { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, 0) },
-       { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, 0) },
-       { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K) },
-       { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
-       { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) },
-       { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) },
+       { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
+       { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SPI_NOR_QUAD_READ) },
+       { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SPI_NOR_QUAD_READ) },
+       { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SPI_NOR_QUAD_READ) },
+       { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
+       { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+       { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+       { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
 
        /* PMC */
        { "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
@@ -891,6 +892,45 @@ static int spansion_quad_enable(struct spi_nor *nor)
        return 0;
 }
 
+static int micron_quad_enable(struct spi_nor *nor)
+{
+       int ret;
+       u8 val;
+
+       ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+       if (ret < 0) {
+               dev_err(nor->dev, "error %d reading EVCR\n", ret);
+               return ret;
+       }
+
+       write_enable(nor);
+
+       /* set EVCR, enable quad I/O */
+       nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+       ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0);
+       if (ret < 0) {
+               dev_err(nor->dev, "error while writing EVCR register\n");
+               return ret;
+       }
+
+       ret = spi_nor_wait_till_ready(nor);
+       if (ret)
+               return ret;
+
+       /* read EVCR and check it */
+       ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+       if (ret < 0) {
+               dev_err(nor->dev, "error %d reading EVCR\n", ret);
+               return ret;
+       }
+       if (val & EVCR_QUAD_EN_MICRON) {
+               dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
 {
        int status;
@@ -903,6 +943,13 @@ static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
                        return -EINVAL;
                }
                return status;
+       case CFI_MFR_ST:
+               status = micron_quad_enable(nor);
+               if (status) {
+                       dev_err(nor->dev, "Micron quad-read not enabled\n");
+                       return -EINVAL;
+               }
+               return status;
        default:
                status = spansion_quad_enable(nor);
                if (status) {
index 84673ebcf428846fadf26ae881c39172fd45d612..df51d6025a9017413500046edb78401abe0dfdb3 100644 (file)
@@ -157,7 +157,7 @@ config IPVLAN
       making it transparent to the connected L2 switch.
 
       Ipvlan devices can be added using the "ip" command from the
-      iproute2 package starting with the iproute2-X.Y.ZZ release:
+      iproute2 package starting with the iproute2-3.19 release:
 
       "ip link add link <main-dev> [ NAME ] type ipvlan"
 
index 4ce6ca5f3d365a48ab554c7477b157437fff9357..dc6b78e5342f937e6b3b2d498d4694bc0d0fe6c0 100644 (file)
@@ -40,7 +40,7 @@ config DEV_APPLETALK
 
 config LTPC
        tristate "Apple/Farallon LocalTalk PC support"
-       depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+       depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
        help
          This allows you to use the AppleTalk PC card to connect to LocalTalk
          networks. The card is also known as the Farallon PhoneNet PC card.
index 675b082283d64bb7bd6d962516317ac0a565aa82..c026ce9cd7b6f52f1a6bff88b9e6053b13ecebcd 100644 (file)
@@ -928,6 +928,39 @@ static inline void slave_disable_netpoll(struct slave *slave)
 
 static void bond_poll_controller(struct net_device *bond_dev)
 {
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave = NULL;
+       struct list_head *iter;
+       struct ad_info ad_info;
+       struct netpoll_info *ni;
+       const struct net_device_ops *ops;
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               if (bond_3ad_get_active_agg_info(bond, &ad_info))
+                       return;
+
+       rcu_read_lock_bh();
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               ops = slave->dev->netdev_ops;
+               if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+                       continue;
+
+               if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+                       struct aggregator *agg =
+                           SLAVE_AD_INFO(slave)->port.aggregator;
+
+                       if (agg &&
+                           agg->aggregator_identifier != ad_info.aggregator_id)
+                               continue;
+               }
+
+               ni = rcu_dereference_bh(slave->dev->npinfo);
+               if (down_trylock(&ni->dev_lock))
+                       continue;
+               ops->ndo_poll_controller(slave->dev);
+               up(&ni->dev_lock);
+       }
+       rcu_read_unlock_bh();
 }
 
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
index e7a6363e736b6f2b35256dfd8fb957cf62abdbe0..27ad312e7abf34bdf94a86ce66fa5ae900da1aed 100644 (file)
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
-#include <asm/bfin_can.h>
 #include <asm/portmux.h>
 
 #define DRV_NAME "bfin_can"
 #define BFIN_CAN_TIMEOUT 100
 #define TX_ECHO_SKB_MAX  1
 
+/* transmit and receive channels */
+#define TRANSMIT_CHL 24
+#define RECEIVE_STD_CHL 0
+#define RECEIVE_EXT_CHL 4
+#define RECEIVE_RTR_CHL 8
+#define RECEIVE_EXT_RTR_CHL 12
+#define MAX_CHL_NUMBER 32
+
+/* All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits.  So use a helper macro to streamline this
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/* bfin can registers layout */
+struct bfin_can_mask_regs {
+       __BFP(aml);
+       __BFP(amh);
+};
+
+struct bfin_can_channel_regs {
+       /* data[0,2,4,6] -> data{0,1,2,3} while data[1,3,5,7] is padding */
+       u16 data[8];
+       __BFP(dlc);
+       __BFP(tsv);
+       __BFP(id0);
+       __BFP(id1);
+};
+
+struct bfin_can_regs {
+       /* global control and status registers */
+       __BFP(mc1);             /* offset 0x00 */
+       __BFP(md1);             /* offset 0x04 */
+       __BFP(trs1);            /* offset 0x08 */
+       __BFP(trr1);            /* offset 0x0c */
+       __BFP(ta1);             /* offset 0x10 */
+       __BFP(aa1);             /* offset 0x14 */
+       __BFP(rmp1);            /* offset 0x18 */
+       __BFP(rml1);            /* offset 0x1c */
+       __BFP(mbtif1);          /* offset 0x20 */
+       __BFP(mbrif1);          /* offset 0x24 */
+       __BFP(mbim1);           /* offset 0x28 */
+       __BFP(rfh1);            /* offset 0x2c */
+       __BFP(opss1);           /* offset 0x30 */
+       u32 __pad1[3];
+       __BFP(mc2);             /* offset 0x40 */
+       __BFP(md2);             /* offset 0x44 */
+       __BFP(trs2);            /* offset 0x48 */
+       __BFP(trr2);            /* offset 0x4c */
+       __BFP(ta2);             /* offset 0x50 */
+       __BFP(aa2);             /* offset 0x54 */
+       __BFP(rmp2);            /* offset 0x58 */
+       __BFP(rml2);            /* offset 0x5c */
+       __BFP(mbtif2);          /* offset 0x60 */
+       __BFP(mbrif2);          /* offset 0x64 */
+       __BFP(mbim2);           /* offset 0x68 */
+       __BFP(rfh2);            /* offset 0x6c */
+       __BFP(opss2);           /* offset 0x70 */
+       u32 __pad2[3];
+       __BFP(clock);           /* offset 0x80 */
+       __BFP(timing);          /* offset 0x84 */
+       __BFP(debug);           /* offset 0x88 */
+       __BFP(status);          /* offset 0x8c */
+       __BFP(cec);             /* offset 0x90 */
+       __BFP(gis);             /* offset 0x94 */
+       __BFP(gim);             /* offset 0x98 */
+       __BFP(gif);             /* offset 0x9c */
+       __BFP(control);         /* offset 0xa0 */
+       __BFP(intr);            /* offset 0xa4 */
+       __BFP(version);         /* offset 0xa8 */
+       __BFP(mbtd);            /* offset 0xac */
+       __BFP(ewr);             /* offset 0xb0 */
+       __BFP(esr);             /* offset 0xb4 */
+       u32 __pad3[2];
+       __BFP(ucreg);           /* offset 0xc0 */
+       __BFP(uccnt);           /* offset 0xc4 */
+       __BFP(ucrc);            /* offset 0xc8 */
+       __BFP(uccnf);           /* offset 0xcc */
+       u32 __pad4[1];
+       __BFP(version2);        /* offset 0xd4 */
+       u32 __pad5[10];
+
+       /* channel(mailbox) mask and message registers */
+       struct bfin_can_mask_regs msk[MAX_CHL_NUMBER];          /* offset 0x100 */
+       struct bfin_can_channel_regs chl[MAX_CHL_NUMBER];       /* offset 0x200 */
+};
+
+#undef __BFP
+
+#define SRS 0x0001             /* Software Reset */
+#define SER 0x0008             /* Stuff Error */
+#define BOIM 0x0008            /* Enable Bus Off Interrupt */
+#define CCR 0x0080             /* CAN Configuration Mode Request */
+#define CCA 0x0080             /* Configuration Mode Acknowledge */
+#define SAM 0x0080             /* Sampling */
+#define AME 0x8000             /* Acceptance Mask Enable */
+#define RMLIM 0x0080           /* Enable RX Message Lost Interrupt */
+#define RMLIS 0x0080           /* RX Message Lost IRQ Status */
+#define RTR 0x4000             /* Remote Frame Transmission Request */
+#define BOIS 0x0008            /* Bus Off IRQ Status */
+#define IDE 0x2000             /* Identifier Extension */
+#define EPIS 0x0004            /* Error-Passive Mode IRQ Status */
+#define EPIM 0x0004            /* Enable Error-Passive Mode Interrupt */
+#define EWTIS 0x0001           /* TX Error Count IRQ Status */
+#define EWRIS 0x0002           /* RX Error Count IRQ Status */
+#define BEF 0x0040             /* Bit Error Flag */
+#define FER 0x0080             /* Form Error Flag */
+#define SMR 0x0020             /* Sleep Mode Request */
+#define SMACK 0x0008           /* Sleep Mode Acknowledge */
+
 /*
  * bfin can private data
  */
@@ -78,8 +186,8 @@ static int bfin_can_set_bittiming(struct net_device *dev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                timing |= SAM;
 
-       bfin_write(&reg->clock, clk);
-       bfin_write(&reg->timing, timing);
+       writew(clk, &reg->clock);
+       writew(timing, &reg->timing);
 
        netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing);
 
@@ -94,16 +202,14 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
        int i;
 
        /* disable interrupts */
-       bfin_write(&reg->mbim1, 0);
-       bfin_write(&reg->mbim2, 0);
-       bfin_write(&reg->gim, 0);
+       writew(0, &reg->mbim1);
+       writew(0, &reg->mbim2);
+       writew(0, &reg->gim);
 
        /* reset can and enter configuration mode */
-       bfin_write(&reg->control, SRS | CCR);
-       SSYNC();
-       bfin_write(&reg->control, CCR);
-       SSYNC();
-       while (!(bfin_read(&reg->control) & CCA)) {
+       writew(SRS | CCR, &reg->control);
+       writew(CCR, &reg->control);
+       while (!(readw(&reg->control) & CCA)) {
                udelay(10);
                if (--timeout == 0) {
                        netdev_err(dev, "fail to enter configuration mode\n");
@@ -116,34 +222,33 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
         * by writing to CAN Mailbox Configuration Registers 1 and 2
         * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
         */
-       bfin_write(&reg->mc1, 0);
-       bfin_write(&reg->mc2, 0);
+       writew(0, &reg->mc1);
+       writew(0, &reg->mc2);
 
        /* Set Mailbox Direction */
-       bfin_write(&reg->md1, 0xFFFF);   /* mailbox 1-16 are RX */
-       bfin_write(&reg->md2, 0);   /* mailbox 17-32 are TX */
+       writew(0xFFFF, &reg->md1);   /* mailbox 1-16 are RX */
+       writew(0, &reg->md2);   /* mailbox 17-32 are TX */
 
        /* RECEIVE_STD_CHL */
        for (i = 0; i < 2; i++) {
-               bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
-               bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
-               bfin_write(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
-               bfin_write(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
-               bfin_write(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
+               writew(0, &reg->chl[RECEIVE_STD_CHL + i].id0);
+               writew(AME, &reg->chl[RECEIVE_STD_CHL + i].id1);
+               writew(0, &reg->chl[RECEIVE_STD_CHL + i].dlc);
+               writew(0x1FFF, &reg->msk[RECEIVE_STD_CHL + i].amh);
+               writew(0xFFFF, &reg->msk[RECEIVE_STD_CHL + i].aml);
        }
 
        /* RECEIVE_EXT_CHL */
        for (i = 0; i < 2; i++) {
-               bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
-               bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
-               bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
-               bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
-               bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
+               writew(0, &reg->chl[RECEIVE_EXT_CHL + i].id0);
+               writew(AME | IDE, &reg->chl[RECEIVE_EXT_CHL + i].id1);
+               writew(0, &reg->chl[RECEIVE_EXT_CHL + i].dlc);
+               writew(0x1FFF, &reg->msk[RECEIVE_EXT_CHL + i].amh);
+               writew(0xFFFF, &reg->msk[RECEIVE_EXT_CHL + i].aml);
        }
 
-       bfin_write(&reg->mc2, BIT(TRANSMIT_CHL - 16));
-       bfin_write(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
-       SSYNC();
+       writew(BIT(TRANSMIT_CHL - 16), &reg->mc2);
+       writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mc1);
 
        priv->can.state = CAN_STATE_STOPPED;
 }
@@ -157,9 +262,9 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
        /*
         * leave configuration mode
         */
-       bfin_write(&reg->control, bfin_read(&reg->control) & ~CCR);
+       writew(readw(&reg->control) & ~CCR, &reg->control);
 
-       while (bfin_read(&reg->status) & CCA) {
+       while (readw(&reg->status) & CCA) {
                udelay(10);
                if (--timeout == 0) {
                        netdev_err(dev, "fail to leave configuration mode\n");
@@ -170,26 +275,25 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
        /*
         * clear _All_  tx and rx interrupts
         */
-       bfin_write(&reg->mbtif1, 0xFFFF);
-       bfin_write(&reg->mbtif2, 0xFFFF);
-       bfin_write(&reg->mbrif1, 0xFFFF);
-       bfin_write(&reg->mbrif2, 0xFFFF);
+       writew(0xFFFF, &reg->mbtif1);
+       writew(0xFFFF, &reg->mbtif2);
+       writew(0xFFFF, &reg->mbrif1);
+       writew(0xFFFF, &reg->mbrif2);
 
        /*
         * clear global interrupt status register
         */
-       bfin_write(&reg->gis, 0x7FF); /* overwrites with '1' */
+       writew(0x7FF, &reg->gis); /* overwrites with '1' */
 
        /*
         * Initialize Interrupts
         * - set bits in the mailbox interrupt mask register
         * - global interrupt mask
         */
-       bfin_write(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
-       bfin_write(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
+       writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mbim1);
+       writew(BIT(TRANSMIT_CHL - 16), &reg->mbim2);
 
-       bfin_write(&reg->gim, EPIM | BOIM | RMLIM);
-       SSYNC();
+       writew(EPIM | BOIM | RMLIM, &reg->gim);
 }
 
 static void bfin_can_start(struct net_device *dev)
@@ -226,7 +330,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev,
        struct bfin_can_priv *priv = netdev_priv(dev);
        struct bfin_can_regs __iomem *reg = priv->membase;
 
-       u16 cec = bfin_read(&reg->cec);
+       u16 cec = readw(&reg->cec);
 
        bec->txerr = cec >> 8;
        bec->rxerr = cec;
@@ -252,28 +356,28 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* fill id */
        if (id & CAN_EFF_FLAG) {
-               bfin_write(&reg->chl[TRANSMIT_CHL].id0, id);
+               writew(id, &reg->chl[TRANSMIT_CHL].id0);
                val = ((id & 0x1FFF0000) >> 16) | IDE;
        } else
                val = (id << 2);
        if (id & CAN_RTR_FLAG)
                val |= RTR;
-       bfin_write(&reg->chl[TRANSMIT_CHL].id1, val | AME);
+       writew(val | AME, &reg->chl[TRANSMIT_CHL].id1);
 
        /* fill payload */
        for (i = 0; i < 8; i += 2) {
                val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
                        ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
-               bfin_write(&reg->chl[TRANSMIT_CHL].data[i], val);
+               writew(val, &reg->chl[TRANSMIT_CHL].data[i]);
        }
 
        /* fill data length code */
-       bfin_write(&reg->chl[TRANSMIT_CHL].dlc, dlc);
+       writew(dlc, &reg->chl[TRANSMIT_CHL].dlc);
 
        can_put_echo_skb(skb, dev, 0);
 
        /* set transmit request */
-       bfin_write(&reg->trs2, BIT(TRANSMIT_CHL - 16));
+       writew(BIT(TRANSMIT_CHL - 16), &reg->trs2);
 
        return 0;
 }
@@ -296,26 +400,26 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
        /* get id */
        if (isrc & BIT(RECEIVE_EXT_CHL)) {
                /* extended frame format (EFF) */
-               cf->can_id = ((bfin_read(&reg->chl[RECEIVE_EXT_CHL].id1)
+               cf->can_id = ((readw(&reg->chl[RECEIVE_EXT_CHL].id1)
                             & 0x1FFF) << 16)
-                            + bfin_read(&reg->chl[RECEIVE_EXT_CHL].id0);
+                            + readw(&reg->chl[RECEIVE_EXT_CHL].id0);
                cf->can_id |= CAN_EFF_FLAG;
                obj = RECEIVE_EXT_CHL;
        } else {
                /* standard frame format (SFF) */
-               cf->can_id = (bfin_read(&reg->chl[RECEIVE_STD_CHL].id1)
+               cf->can_id = (readw(&reg->chl[RECEIVE_STD_CHL].id1)
                             & 0x1ffc) >> 2;
                obj = RECEIVE_STD_CHL;
        }
-       if (bfin_read(&reg->chl[obj].id1) & RTR)
+       if (readw(&reg->chl[obj].id1) & RTR)
                cf->can_id |= CAN_RTR_FLAG;
 
        /* get data length code */
-       cf->can_dlc = get_can_dlc(bfin_read(&reg->chl[obj].dlc) & 0xF);
+       cf->can_dlc = get_can_dlc(readw(&reg->chl[obj].dlc) & 0xF);
 
        /* get payload */
        for (i = 0; i < 8; i += 2) {
-               val = bfin_read(&reg->chl[obj].data[i]);
+               val = readw(&reg->chl[obj].data[i]);
                cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
                cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
        }
@@ -369,7 +473,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
 
        if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
                                state == CAN_STATE_ERROR_PASSIVE)) {
-               u16 cec = bfin_read(&reg->cec);
+               u16 cec = readw(&reg->cec);
                u8 rxerr = cec;
                u8 txerr = cec >> 8;
 
@@ -420,23 +524,23 @@ static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
        struct net_device_stats *stats = &dev->stats;
        u16 status, isrc;
 
-       if ((irq == priv->tx_irq) && bfin_read(&reg->mbtif2)) {
+       if ((irq == priv->tx_irq) && readw(&reg->mbtif2)) {
                /* transmission complete interrupt */
-               bfin_write(&reg->mbtif2, 0xFFFF);
+               writew(0xFFFF, &reg->mbtif2);
                stats->tx_packets++;
-               stats->tx_bytes += bfin_read(&reg->chl[TRANSMIT_CHL].dlc);
+               stats->tx_bytes += readw(&reg->chl[TRANSMIT_CHL].dlc);
                can_get_echo_skb(dev, 0);
                netif_wake_queue(dev);
-       } else if ((irq == priv->rx_irq) && bfin_read(&reg->mbrif1)) {
+       } else if ((irq == priv->rx_irq) && readw(&reg->mbrif1)) {
                /* receive interrupt */
-               isrc = bfin_read(&reg->mbrif1);
-               bfin_write(&reg->mbrif1, 0xFFFF);
+               isrc = readw(&reg->mbrif1);
+               writew(0xFFFF, &reg->mbrif1);
                bfin_can_rx(dev, isrc);
-       } else if ((irq == priv->err_irq) && bfin_read(&reg->gis)) {
+       } else if ((irq == priv->err_irq) && readw(&reg->gis)) {
                /* error interrupt */
-               isrc = bfin_read(&reg->gis);
-               status = bfin_read(&reg->esr);
-               bfin_write(&reg->gis, 0x7FF);
+               isrc = readw(&reg->gis);
+               status = readw(&reg->esr);
+               writew(0x7FF, &reg->gis);
                bfin_can_err(dev, isrc, status);
        } else {
                return IRQ_NONE;
@@ -556,16 +660,10 @@ static int bfin_can_probe(struct platform_device *pdev)
                goto exit;
        }
 
-       if (!request_mem_region(res_mem->start, resource_size(res_mem),
-                               dev_name(&pdev->dev))) {
-               err = -EBUSY;
-               goto exit;
-       }
-
        /* request peripheral pins */
        err = peripheral_request_list(pdata, dev_name(&pdev->dev));
        if (err)
-               goto exit_mem_release;
+               goto exit;
 
        dev = alloc_bfin_candev();
        if (!dev) {
@@ -574,7 +672,13 @@ static int bfin_can_probe(struct platform_device *pdev)
        }
 
        priv = netdev_priv(dev);
-       priv->membase = (void __iomem *)res_mem->start;
+
+       priv->membase = devm_ioremap_resource(&pdev->dev, res_mem);
+       if (IS_ERR(priv->membase)) {
+               err = PTR_ERR(priv->membase);
+               goto exit_peri_pin_free;
+       }
+
        priv->rx_irq = rx_irq->start;
        priv->tx_irq = tx_irq->start;
        priv->err_irq = err_irq->start;
@@ -606,8 +710,6 @@ exit_candev_free:
        free_candev(dev);
 exit_peri_pin_free:
        peripheral_free_list(pdata);
-exit_mem_release:
-       release_mem_region(res_mem->start, resource_size(res_mem));
 exit:
        return err;
 }
@@ -616,15 +718,11 @@ static int bfin_can_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
        struct bfin_can_priv *priv = netdev_priv(dev);
-       struct resource *res;
 
        bfin_can_set_reset_mode(dev);
 
        unregister_candev(dev);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
        peripheral_free_list(priv->pin_list);
 
        free_candev(dev);
@@ -641,9 +739,8 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
 
        if (netif_running(dev)) {
                /* enter sleep mode */
-               bfin_write(&reg->control, bfin_read(&reg->control) | SMR);
-               SSYNC();
-               while (!(bfin_read(&reg->intr) & SMACK)) {
+               writew(readw(&reg->control) | SMR, &reg->control);
+               while (!(readw(&reg->intr) & SMACK)) {
                        udelay(10);
                        if (--timeout == 0) {
                                netdev_err(dev, "fail to enter sleep mode\n");
@@ -663,8 +760,7 @@ static int bfin_can_resume(struct platform_device *pdev)
 
        if (netif_running(dev)) {
                /* leave sleep mode */
-               bfin_write(&reg->intr, 0);
-               SSYNC();
+               writew(0, &reg->intr);
        }
 
        return 0;
index 0f217e99904f15750bcef351ba2ddf04100cebca..22e2ebf313332f4dd004162b31faf288d7f7ab25 100644 (file)
@@ -107,8 +107,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)  \
 {                                                                      \
        u32 indir, dir;                                                 \
        spin_lock(&priv->indir_lock);                                   \
-       indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        dir = __raw_readl(priv->name + off);                            \
+       indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        spin_unlock(&priv->indir_lock);                                 \
        return (u64)indir << 32 | dir;                                  \
 }                                                                      \
index 1ebd8f96072a1b00e943ca0247acdd2d5f5d40ab..7bc5998384c68fac1dbf2654c97f275e419469fd 100644 (file)
@@ -717,6 +717,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .get_strings            = mv88e6352_get_strings,
        .get_ethtool_stats      = mv88e6352_get_ethtool_stats,
        .get_sset_count         = mv88e6352_get_sset_count,
+       .set_eee                = mv88e6xxx_set_eee,
+       .get_eee                = mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
        .get_temp               = mv88e6352_get_temp,
        .get_temp_limit         = mv88e6352_get_temp_limit,
index a83ace0803e79638d510ab8d2024a9dbcc0e055d..c18ffc98aaccf126874590630484912cc5dd47ae 100644 (file)
@@ -649,6 +649,57 @@ int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
        return mv88e6xxx_phy_wait(ds);
 }
 
+int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+       int reg;
+
+       reg = mv88e6xxx_phy_read_indirect(ds, port, 16);
+       if (reg < 0)
+               return -EOPNOTSUPP;
+
+       e->eee_enabled = !!(reg & 0x0200);
+       e->tx_lpi_enabled = !!(reg & 0x0100);
+
+       reg = REG_READ(REG_PORT(port), 0);
+       e->eee_active = !!(reg & 0x0040);
+
+       return 0;
+}
+
+static int mv88e6xxx_eee_enable_set(struct dsa_switch *ds, int port,
+                                   bool eee_enabled, bool tx_lpi_enabled)
+{
+       int reg, nreg;
+
+       reg = mv88e6xxx_phy_read_indirect(ds, port, 16);
+       if (reg < 0)
+               return reg;
+
+       nreg = reg & ~0x0300;
+       if (eee_enabled)
+               nreg |= 0x0200;
+       if (tx_lpi_enabled)
+               nreg |= 0x0100;
+
+       if (nreg != reg)
+               return mv88e6xxx_phy_write_indirect(ds, port, 16, nreg);
+
+       return 0;
+}
+
+int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+                     struct phy_device *phydev, struct ethtool_eee *e)
+{
+       int ret;
+
+       ret = mv88e6xxx_eee_enable_set(ds, port, e->eee_enabled,
+                                      e->tx_lpi_enabled);
+       if (ret)
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
index 72942271bb67ed988484e09c780a3ae46e54251c..5fd42ced90117c741c192bed8b03d71f060c9943 100644 (file)
@@ -88,6 +88,9 @@ int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
 int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
                                 u16 val);
+int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+                     struct phy_device *phydev, struct ethtool_eee *e);
 
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
index 7769c05543f17fcc8432cac6145ebdd70c4fc5da..ec6eac1f8c95ab79d33209e272a31f71484b0f62 100644 (file)
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
     link->open++;
 
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ax_open(dev);
 } /* axnet_open */
index 9fb7b9d4fd6c6595f7642d859678bc3097998750..2777289a26c0419f855926ef028942074ca62a2f 100644 (file)
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
 
     info->phy_id = info->eth_phy;
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ei_open(dev);
 } /* pcnet_open */
index f3470d96837a7fb0e59307b000855fe18a882af5..bab01c84916549e7599e34fe7a1c15bf2d10b456 100644 (file)
@@ -757,7 +757,7 @@ static void emac_shutdown(struct net_device *dev)
        /* Disable all interrupt */
        writel(0, db->membase + EMAC_INT_CTL_REG);
 
-       /* clear interupt status */
+       /* clear interrupt status */
        reg_val = readl(db->membase + EMAC_INT_STA_REG);
        writel(reg_val, db->membase + EMAC_INT_STA_REG);
 
index a1ee261bff5c052adcfdf301ec045b8d7f7e43e8..fd9296a5014db6f033cb11a5a42062610fc2cbd8 100644 (file)
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
        u16 pktlength;
        u16 pktstatus;
 
-       while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+       while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+              (count < limit))  {
                pktstatus = rxstatus >> 16;
                pktlength = rxstatus & 0xffff;
 
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
        struct altera_tse_private *priv =
                        container_of(napi, struct altera_tse_private, napi);
        int rxcomplete = 0;
-       int txcomplete = 0;
        unsigned long int flags;
 
-       txcomplete = tse_tx_complete(priv);
+       tse_tx_complete(priv);
 
        rxcomplete = tse_rx(priv, budget);
 
-       if (rxcomplete >= budget || txcomplete > 0)
-               return rxcomplete;
+       if (rxcomplete < budget) {
 
-       napi_gro_flush(napi, false);
-       __napi_complete(napi);
+               napi_gro_flush(napi, false);
+               __napi_complete(napi);
 
-       netdev_dbg(priv->dev,
-                  "NAPI Complete, did %d packets with budget %d\n",
-                  txcomplete+rxcomplete, budget);
+               netdev_dbg(priv->dev,
+                          "NAPI Complete, did %d packets with budget %d\n",
+                          rxcomplete, budget);
 
-       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
-       priv->dmaops->enable_rxirq(priv);
-       priv->dmaops->enable_txirq(priv);
-       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
-       return rxcomplete + txcomplete;
+               spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+               priv->dmaops->enable_rxirq(priv);
+               priv->dmaops->enable_txirq(priv);
+               spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+       }
+       return rxcomplete;
 }
 
 /* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct altera_tse_private *priv;
-       unsigned long int flags;
 
        if (unlikely(!dev)) {
                pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
        }
        priv = netdev_priv(dev);
 
-       /* turn off desc irqs and enable napi rx */
-       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+       spin_lock(&priv->rxdma_irq_lock);
+       /* reset IRQs */
+       priv->dmaops->clear_rxirq(priv);
+       priv->dmaops->clear_txirq(priv);
+       spin_unlock(&priv->rxdma_irq_lock);
 
        if (likely(napi_schedule_prep(&priv->napi))) {
+               spin_lock(&priv->rxdma_irq_lock);
                priv->dmaops->disable_rxirq(priv);
                priv->dmaops->disable_txirq(priv);
+               spin_unlock(&priv->rxdma_irq_lock);
                __napi_schedule(&priv->napi);
        }
 
-       /* reset IRQs */
-       priv->dmaops->clear_rxirq(priv);
-       priv->dmaops->clear_txirq(priv);
-
-       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 
        return IRQ_HANDLED;
 }
@@ -1407,7 +1406,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        }
 
        if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
-                                &priv->rx_fifo_depth)) {
+                                &priv->tx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
                ret = -ENXIO;
                goto err_free_netdev;
index 4c2ae22217804fd8ce8e6cc8a27c6cf2855abba2..94960055fa1f802fcee1debd7584b4b1e7ed0cd3 100644 (file)
@@ -723,13 +723,13 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                         * the last correctly noting the error.
                         */
                        if(status & ERR_BIT) {
-                               /* reseting flags */
+                               /* resetting flags */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
                                goto err_next_pkt;
                        }
                        /* check for STP and ENP */
                        if(!((status & STP_BIT) && (status & ENP_BIT))){
-                               /* reseting flags */
+                               /* resetting flags */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
                                goto err_next_pkt;
                        }
index a75092d584cc9751fdd37197e7d8bc626867bd94..7cdb185124073022cddb73ece5ae33decc90be53 100644 (file)
@@ -614,7 +614,7 @@ typedef enum {
 /* Assume contoller gets data 10 times the maximum processing time */
 #define  REPEAT_CNT                    10
 
-/* amd8111e decriptor flag definitions */
+/* amd8111e descriptor flag definitions */
 typedef enum {
 
        OWN_BIT         =       (1 << 15),
index 11d6e6561df159c3dc9dff28fc504e945a77f47b..8eb37e0194b5b839553046fb231e86ef1ddfa30f 100644 (file)
@@ -1708,7 +1708,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
        /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
        if (!is_valid_ether_addr(dev->dev_addr))
-               memset(dev->dev_addr, 0, ETH_ALEN);
+               eth_zero_addr(dev->dev_addr);
 
        if (pcnet32_debug & NETIF_MSG_PROBE) {
                pr_cont(" %pM", dev->dev_addr);
index b93d4404d975571f0f4033f06f4de15b576156d3..885b02b5be07f6732fc0540684cb7875aeec1140 100644 (file)
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
        }
 }
 
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
+       struct net_device *netdev = pdata->netdev;
+       unsigned int i;
+       int ret;
+
+       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+                              netdev->name, pdata);
+       if (ret) {
+               netdev_alert(netdev, "error requesting irq %d\n",
+                            pdata->dev_irq);
+               return ret;
+       }
+
+       if (!pdata->per_channel_irq)
+               return 0;
+
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               snprintf(channel->dma_irq_name,
+                        sizeof(channel->dma_irq_name) - 1,
+                        "%s-TxRx-%u", netdev_name(netdev),
+                        channel->queue_index);
+
+               ret = devm_request_irq(pdata->dev, channel->dma_irq,
+                                      xgbe_dma_isr, 0,
+                                      channel->dma_irq_name, channel);
+               if (ret) {
+                       netdev_alert(netdev, "error requesting irq %d\n",
+                                    channel->dma_irq);
+                       goto err_irq;
+               }
+       }
+
+       return 0;
+
+err_irq:
+       /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+       for (i--, channel--; i < pdata->channel_count; i--, channel--)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
+       unsigned int i;
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       if (!pdata->per_channel_irq)
+               return;
+
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
                return -EINVAL;
        }
 
-       phy_stop(pdata->phydev);
-
        spin_lock_irqsave(&pdata->lock, flags);
 
        if (caller == XGMAC_DRIVER_CONTEXT)
                netif_device_detach(netdev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata, 0);
 
-       /* Powerdown Tx/Rx */
        hw_if->powerdown_tx(pdata);
        hw_if->powerdown_rx(pdata);
 
+       xgbe_napi_disable(pdata, 0);
+
+       phy_stop(pdata->phydev);
+
        pdata->power_down = 1;
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 
        phy_start(pdata->phydev);
 
-       /* Enable Tx/Rx */
+       xgbe_napi_enable(pdata, 0);
+
        hw_if->powerup_tx(pdata);
        hw_if->powerup_rx(pdata);
 
        if (caller == XGMAC_DRIVER_CONTEXT)
                netif_device_attach(netdev);
 
-       xgbe_napi_enable(pdata, 0);
        netif_tx_start_all_queues(netdev);
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct net_device *netdev = pdata->netdev;
+       int ret;
 
        DBGPR("-->xgbe_start\n");
 
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        phy_start(pdata->phydev);
 
+       xgbe_napi_enable(pdata, 1);
+
+       ret = xgbe_request_irqs(pdata);
+       if (ret)
+               goto err_napi;
+
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
 
        xgbe_init_tx_timers(pdata);
 
-       xgbe_napi_enable(pdata, 1);
        netif_tx_start_all_queues(netdev);
 
        DBGPR("<--xgbe_start\n");
 
        return 0;
+
+err_napi:
+       xgbe_napi_disable(pdata, 1);
+
+       phy_stop(pdata->phydev);
+
+       hw_if->exit(pdata);
+
+       return ret;
 }
 
 static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
        DBGPR("-->xgbe_stop\n");
 
-       phy_stop(pdata->phydev);
-
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata, 1);
 
        xgbe_stop_tx_timers(pdata);
 
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
 
+       xgbe_free_irqs(pdata);
+
+       xgbe_napi_disable(pdata, 1);
+
+       phy_stop(pdata->phydev);
+
+       hw_if->exit(pdata);
+
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_channel *channel;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int i;
-
        DBGPR("-->xgbe_restart_dev\n");
 
        /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
                return;
 
        xgbe_stop(pdata);
-       synchronize_irq(pdata->dev_irq);
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++)
-                       synchronize_irq(channel->dma_irq);
-       }
 
        xgbe_free_tx_data(pdata);
        xgbe_free_rx_data(pdata);
 
-       /* Issue software reset to device */
-       hw_if->exit(pdata);
-
        xgbe_start(pdata);
 
        DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
 static int xgbe_open(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
-       struct xgbe_channel *channel = NULL;
-       unsigned int i = 0;
        int ret;
 
        DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
        INIT_WORK(&pdata->restart_work, xgbe_restart);
        INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
-       /* Request interrupts */
-       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
-                              netdev->name, pdata);
-       if (ret) {
-               netdev_alert(netdev, "error requesting irq %d\n",
-                            pdata->dev_irq);
-               goto err_rings;
-       }
-
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++) {
-                       snprintf(channel->dma_irq_name,
-                                sizeof(channel->dma_irq_name) - 1,
-                                "%s-TxRx-%u", netdev_name(netdev),
-                                channel->queue_index);
-
-                       ret = devm_request_irq(pdata->dev, channel->dma_irq,
-                                              xgbe_dma_isr, 0,
-                                              channel->dma_irq_name, channel);
-                       if (ret) {
-                               netdev_alert(netdev,
-                                            "error requesting irq %d\n",
-                                            channel->dma_irq);
-                               goto err_irq;
-                       }
-               }
-       }
-
        ret = xgbe_start(pdata);
        if (ret)
-               goto err_start;
+               goto err_rings;
 
        DBGPR("<--xgbe_open\n");
 
        return 0;
 
-err_start:
-       hw_if->exit(pdata);
-
-err_irq:
-       if (pdata->per_channel_irq) {
-               /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
-               for (i--, channel--; i < pdata->channel_count; i--, channel--)
-                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
-       }
-
-       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
 err_rings:
        desc_if->free_ring_resources(pdata);
 
@@ -1399,30 +1424,16 @@ err_phy_init:
 static int xgbe_close(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
-       struct xgbe_channel *channel;
-       unsigned int i;
 
        DBGPR("-->xgbe_close\n");
 
        /* Stop the device */
        xgbe_stop(pdata);
 
-       /* Issue software reset to device */
-       hw_if->exit(pdata);
-
        /* Free the ring descriptors and buffers */
        desc_if->free_ring_resources(pdata);
 
-       /* Release the interrupts */
-       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++)
-                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
-       }
-
        /* Free the channel and ring structures */
        xgbe_free_channels(pdata);
 
index 13e8f95c077c3c57089e7b7f33c1577efc7143cf..1eea3e5a5d085b18a16da5b2195eba148b47d906 100644 (file)
@@ -620,7 +620,7 @@ struct xgbe_hw_features {
        unsigned int mgk;               /* PMT magic packet */
        unsigned int mmc;               /* RMON module */
        unsigned int aoe;               /* ARP Offload */
-       unsigned int ts;                /* IEEE 1588-2008 Adavanced Timestamp */
+       unsigned int ts;                /* IEEE 1588-2008 Advanced Timestamp */
        unsigned int eee;               /* Energy Efficient Ethernet */
        unsigned int tx_coe;            /* Tx Checksum Offload */
        unsigned int rx_coe;            /* Rx Checksum Offload */
index 842fe7684904351652f669d7838966597e467bcc..7fcaf0da42a8936e0f4c54ef03f8d3396d9f39d6 100644 (file)
@@ -720,7 +720,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
            mace_reset(dev);
                /*
                 * XXX mace likes to hang the machine after a xmtfs error.
-                * This is hard to reproduce, reseting *may* help
+                * This is hard to reproduce, resetting *may* help
                 */
        }
        cp = mp->tx_cmds + NCMDS_TX * i;
index 6e66127e6abf5a5a17a877ba315b058e25bdd865..89914ca17a490799b3fa9ec9f54ab87c7cd85a80 100644 (file)
@@ -575,7 +575,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
                        mace_reset(dev);
                        /*
                         * XXX mace likes to hang the machine after a xmtfs error.
-                        * This is hard to reproduce, reseting *may* help
+                        * This is hard to reproduce, resetting *may* help
                         */
                }
                /* dma should have finished */
index 52fdfe22597807dcde7034b8b9b5a2d84080ec42..a8b80c56ac25e3b59ab28a052f99c4e46ef5c27e 100644 (file)
@@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
 
 /*
  * atl1c_read_phy_core
- * core funtion to read register in PHY via MDIO control regsiter.
+ * core function to read register in PHY via MDIO control regsiter.
  * ext: extension register (see IEEE 802.3)
  * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
  * reg: reg to read
@@ -356,7 +356,7 @@ int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
 
 /*
  * atl1c_write_phy_core
- * core funtion to write to register in PHY via MDIO control regsiter.
+ * core function to write to register in PHY via MDIO control register.
  * ext: extension register (see IEEE 802.3)
  * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
  * reg: reg to write
index 587f63e87588f73a3e310066f3fd7494d1259e3f..932bd1862f7adeb7a95cec8ae1efda76edb7a728 100644 (file)
@@ -752,7 +752,7 @@ static void atl1c_patch_assign(struct atl1c_hw *hw)
 
        if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
            hw->revision_id == L2CB_V21) {
-               /* config acess mode */
+               /* config access mode */
                pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
                                       REG_PCIE_DEV_MISC_CTRL);
                pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
index 5b308a4a4d0eccc35c641967fcf2459ec736f094..783543ad1fcfa1a4976090e0797f7f15f29a1724 100644 (file)
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        /* RBUF misc statistics */
        STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
        STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
-       STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-       STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-       STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+       STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+       STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+       STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
                s = &bcm_sysport_gstrings_stats[i];
                switch (s->type) {
                case BCM_SYSPORT_STAT_NETDEV:
+               case BCM_SYSPORT_STAT_SOFT:
                        continue;
                case BCM_SYSPORT_STAT_MIB_RX:
                case BCM_SYSPORT_STAT_MIB_TX:
index fc19417d82a505dc61c9f25a940f891522763e00..7e3d87a88c76a81e2c36b65559d8b2b0bcf34217 100644 (file)
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
        BCM_SYSPORT_STAT_RUNT,
        BCM_SYSPORT_STAT_RXCHK,
        BCM_SYSPORT_STAT_RBUF,
+       BCM_SYSPORT_STAT_SOFT,
 };
 
 /* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
 #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
 #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
 #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
 
 #define STAT_RXCHK(str, m, ofs) { \
        .stat_string = str, \
index ffe4e003e636db95054df9e1b76b5198a2b5e2b1..e3d853cab7c9644c241cd42ba1a2844b82e55176 100644 (file)
@@ -2446,7 +2446,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        }
        packet = skb_put(skb, pkt_size);
        memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
-       memset(packet + ETH_ALEN, 0, ETH_ALEN);
+       eth_zero_addr(packet + ETH_ALEN);
        memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
        for (i = ETH_HLEN; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
index bd90e50bd8e662d4731b61f82ba9fe06071429a1..d6e1975b7b691ab51ab536a7c9ec413601f24b6c 100644 (file)
@@ -278,7 +278,7 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
 }
 
 
-/* congestion managment port init api description
+/* congestion management port init api description
  * the api works as follows:
  * the driver should pass the cmng_init_input struct, the port_init function
  * will prepare the required internal ram structure which will be passed back
index 778e4cd325714eb34e91a369726ea1818a0c8ebe..b7c77b26a8a49c561d60156154a6045713c6f5cb 100644 (file)
@@ -563,7 +563,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
 *      Will return the NIG ETS registers to init values.Except
 *      credit_upper_bound.
 *      That isn't used in this configuration (No WFQ is enabled) and will be
-*      configured acording to spec
+*      configured according to spec
 *.
 ******************************************************************************/
 static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
@@ -680,7 +680,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
 *      Will return the PBF ETS registers to init values.Except
 *      credit_upper_bound.
 *      That isn't used in this configuration (No WFQ is enabled) and will be
-*      configured acording to spec
+*      configured according to spec
 *.
 ******************************************************************************/
 static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
@@ -738,7 +738,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
 }
 /******************************************************************************
 * Description:
-*      E3B0 disable will return basicly the values to init values.
+*      E3B0 disable will return basically the values to init values.
 *.
 ******************************************************************************/
 static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
@@ -761,7 +761,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
 
 /******************************************************************************
 * Description:
-*      Disable will return basicly the values to init values.
+*      Disable will return basically the values to init values.
 *
 ******************************************************************************/
 int bnx2x_ets_disabled(struct link_params *params,
@@ -2938,7 +2938,7 @@ static int bnx2x_eee_initial_config(struct link_params *params,
 {
        vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
 
-       /* Propogate params' bits --> vars (for migration exposure) */
+       /* Propagate params' bits --> vars (for migration exposure) */
        if (params->eee_mode & EEE_MODE_ENABLE_LPI)
                vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
        else
@@ -13308,7 +13308,7 @@ static void bnx2x_check_over_curr(struct link_params *params,
                vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
 }
 
-/* Returns 0 if no change occured since last check; 1 otherwise. */
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
 static u8 bnx2x_analyze_link_error(struct link_params *params,
                                    struct link_vars *vars, u32 status,
                                    u32 phy_flag, u32 link_flag, u8 notify)
index 7155e1d2c208c7253b846ee954086fef5fd574da..98dcb03fe1b8c6187511f7ae554abea13cbb726c 100644 (file)
@@ -11546,13 +11546,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
        /* Disable iSCSI OOO if MAC configuration is invalid. */
        if (!is_valid_ether_addr(iscsi_mac)) {
                bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-               memset(iscsi_mac, 0, ETH_ALEN);
+               eth_zero_addr(iscsi_mac);
        }
 
        /* Disable FCoE if MAC configuration is invalid. */
        if (!is_valid_ether_addr(fip_mac)) {
                bp->flags |= NO_FCOE_FLAG;
-               memset(bp->fip_mac, 0, ETH_ALEN);
+               eth_zero_addr(bp->fip_mac);
        }
 }
 
@@ -11563,7 +11563,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        int port = BP_PORT(bp);
 
        /* Zero primary MAC configuration */
-       memset(bp->dev->dev_addr, 0, ETH_ALEN);
+       eth_zero_addr(bp->dev->dev_addr);
 
        if (BP_NOMCP(bp)) {
                BNX2X_ERROR("warning: random MAC workaround active\n");
index 6fe547c93e74b0e36f8a920737f25c79c2470143..0770e4bff89bc406c634b36e3da341e19502d64e 100644 (file)
@@ -29,7 +29,7 @@
 #define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND                (0x1<<1)
 /* [RW 1] Initiate the ATC array - reset all the valid bits */
 #define ATC_REG_ATC_INIT_ARRAY                                  0x1100b8
-/* [R 1] ATC initalization done */
+/* [R 1] ATC initialization done */
 #define ATC_REG_ATC_INIT_DONE                                   0x1100bc
 /* [RC 6] Interrupt register #0 read clear */
 #define ATC_REG_ATC_INT_STS_CLR                                         0x1101c0
index e5aca2de1871350f3e47a788c4360b461c02a039..8638d6c97caa4e615ccf2bd3083d837a0eb870c0 100644 (file)
@@ -2693,7 +2693,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
                else
                        /* function has not been loaded yet. Show mac as 0s */
-                       memset(&ivi->mac, 0, ETH_ALEN);
+                       eth_zero_addr(ivi->mac);
 
                /* vlan */
                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
index d1608297c773746237a8faf3697277afcbe45918..612cafb5df5387f060b370d7effbb09e671a398f 100644 (file)
@@ -1620,7 +1620,7 @@ void bnx2x_memset_stats(struct bnx2x *bp)
        if (bp->port.pmf && bp->port.port_stx)
                bnx2x_port_stats_base_init(bp);
 
-       /* mark the end of statistics initializiation */
+       /* mark the end of statistics initialization */
        bp->stats_init = false;
 }
 
index be40eabc5304dad9e1ded4451b65b4dae6a03ae8..15b2d164756058c6c5fb154bdc128f52aa3148c3 100644 (file)
@@ -800,7 +800,7 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
        req->rss_key_size = T_ETH_RSS_KEY;
        req->rss_result_mask = params->rss_result_mask;
 
-       /* flags handled individually for backward/forward compatability */
+       /* flags handled individually for backward/forward compatibility */
        if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
                req->rss_flags |= VFPF_RSS_MODE_DISABLED;
        if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
@@ -1869,7 +1869,7 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
        rss.rss_obj = &vf->rss_conf_obj;
        rss.rss_result_mask = rss_tlv->rss_result_mask;
 
-       /* flags handled individually for backward/forward compatability */
+       /* flags handled individually for backward/forward compatibility */
        rss.rss_flags = 0;
        rss.ramrod_flags = 0;
 
index 51300532ec26306079f75a216dbb98b07b0ce3ed..83c0cb323e0c7fa3cc9c7725f61b59ad847df85c 100644 (file)
@@ -54,6 +54,8 @@
 /* Default highest priority queue for multi queue support */
 #define GENET_Q0_PRIORITY      0
 
+#define GENET_Q16_RX_BD_CNT    \
+       (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
 #define GENET_Q16_TX_BD_CNT    \
        (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
 
@@ -487,6 +489,7 @@ enum bcmgenet_stat_type {
        BCMGENET_STAT_MIB_TX,
        BCMGENET_STAT_RUNT,
        BCMGENET_STAT_MISC,
+       BCMGENET_STAT_SOFT,
 };
 
 struct bcmgenet_stats {
@@ -515,6 +518,7 @@ struct bcmgenet_stats {
 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
 
 #define STAT_GENET_MISC(str, m, offset) { \
        .stat_string = str, \
@@ -614,9 +618,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
                        UMAC_RBUF_OVFL_CNT),
        STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
        STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
-       STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-       STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-       STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+       STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+       STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+       STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCMGENET_STATS_LEN     ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +672,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
                s = &bcmgenet_gstrings_stats[i];
                switch (s->type) {
                case BCMGENET_STAT_NETDEV:
+               case BCMGENET_STAT_SOFT:
                        continue;
                case BCMGENET_STAT_MIB_RX:
                case BCMGENET_STAT_MIB_TX:
@@ -971,41 +976,36 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
 }
 
 /* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
-                                 struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+                                         struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
-       unsigned int bds_compl;
+       unsigned int pkts_compl = 0;
        unsigned int c_index;
+       unsigned int txbds_ready;
+       unsigned int txbds_processed = 0;
 
        /* Compute how many buffers are transmitted since last xmit call */
        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
-       txq = netdev_get_tx_queue(dev, ring->queue);
-
-       last_c_index = ring->c_index;
-       num_tx_bds = ring->size;
-
-       c_index &= (num_tx_bds - 1);
+       c_index &= DMA_C_INDEX_MASK;
 
-       if (c_index >= last_c_index)
-               last_tx_cn = c_index - last_c_index;
+       if (likely(c_index >= ring->c_index))
+               txbds_ready = c_index - ring->c_index;
        else
-               last_tx_cn = num_tx_bds - last_c_index + c_index;
+               txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
 
        netif_dbg(priv, tx_done, dev,
-                 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-                 __func__, ring->index,
-                 c_index, last_tx_cn, last_c_index);
+                 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+                 __func__, ring->index, ring->c_index, c_index, txbds_ready);
 
        /* Reclaim transmitted buffers */
-       while (last_tx_cn-- > 0) {
-               tx_cb_ptr = ring->cbs + last_c_index;
-               bds_compl = 0;
+       while (txbds_processed < txbds_ready) {
+               tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
                if (tx_cb_ptr->skb) {
-                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+                       pkts_compl++;
+                       dev->stats.tx_packets++;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -1021,30 +1021,55 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                                       DMA_TO_DEVICE);
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
-               dev->stats.tx_packets++;
-               ring->free_bds += bds_compl;
 
-               last_c_index++;
-               last_c_index &= (num_tx_bds - 1);
+               txbds_processed++;
+               if (likely(ring->clean_ptr < ring->end_ptr))
+                       ring->clean_ptr++;
+               else
+                       ring->clean_ptr = ring->cb_ptr;
        }
 
-       if (ring->free_bds > (MAX_SKB_FRAGS + 1))
-               ring->int_disable(priv, ring);
+       ring->free_bds += txbds_processed;
+       ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
 
-       if (netif_tx_queue_stopped(txq))
-               netif_tx_wake_queue(txq);
+       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               txq = netdev_get_tx_queue(dev, ring->queue);
+               if (netif_tx_queue_stopped(txq))
+                       netif_tx_wake_queue(txq);
+       }
 
-       ring->c_index = c_index;
+       return pkts_compl;
 }
 
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
                                struct bcmgenet_tx_ring *ring)
 {
+       unsigned int released;
        unsigned long flags;
 
        spin_lock_irqsave(&ring->lock, flags);
-       __bcmgenet_tx_reclaim(dev, ring);
+       released = __bcmgenet_tx_reclaim(dev, ring);
        spin_unlock_irqrestore(&ring->lock, flags);
+
+       return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcmgenet_tx_ring *ring =
+               container_of(napi, struct bcmgenet_tx_ring, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+       if (work_done == 0) {
+               napi_complete(napi);
+               ring->int_enable(ring->priv, ring);
+
+               return 0;
+       }
+
+       return budget;
 }
 
 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1327,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        bcmgenet_tdma_ring_writel(priv, ring->index,
                                  ring->prod_index, TDMA_PROD_INDEX);
 
-       if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+       if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
-               ring->int_enable(priv, ring);
-       }
 
 out:
        spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1644,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        struct device *kdev = &priv->pdev->dev;
        int ret;
        u32 reg, cpu_mask_clear;
+       int index;
 
        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
 
@@ -1647,7 +1671,7 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intr_disable(priv);
 
-       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
 
        dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
@@ -1674,6 +1698,10 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
+       for (index = 0; index < priv->hw_params->tx_queues; index++)
+               bcmgenet_intrl2_1_writel(priv, (1 << index),
+                                        INTRL2_CPU_MASK_CLEAR);
+
        /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
 
@@ -1690,6 +1718,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        u32 flow_period_val = 0;
 
        spin_lock_init(&ring->lock);
+       ring->priv = priv;
+       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
        ring->index = index;
        if (index == DESC_INDEX) {
                ring->queue = 0;
@@ -1702,6 +1732,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        }
        ring->cbs = priv->tx_cbs + start_ptr;
        ring->size = size;
+       ring->clean_ptr = start_ptr;
        ring->c_index = 0;
        ring->free_bds = size;
        ring->write_ptr = start_ptr;
@@ -1732,6 +1763,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  TDMA_WRITE_PTR);
        bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
                                  DMA_END_ADDR);
+
+       napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+                                 unsigned int index)
+{
+       struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+       napi_disable(&ring->napi);
+       netif_napi_del(&ring->napi);
 }
 
 /* Initialize a RDMA ring */
@@ -1741,37 +1783,33 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
        u32 words_per_bd = WORDS_PER_BD(priv);
        int ret;
 
-       priv->num_rx_bds = TOTAL_DESC;
-       priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
        priv->rx_bd_assign_ptr = priv->rx_bds;
        priv->rx_bd_assign_index = 0;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
-       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
-                              GFP_KERNEL);
-       if (!priv->rx_cbs)
-               return -ENOMEM;
 
        ret = bcmgenet_alloc_rx_buffers(priv);
        if (ret) {
-               kfree(priv->rx_cbs);
                return ret;
        }
 
-       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+       bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
        bcmgenet_rdma_ring_writel(priv, index,
                                  ((size << DMA_RING_SIZE_SHIFT) |
                                   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
-       bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
-       bcmgenet_rdma_ring_writel(priv, index,
-                                 words_per_bd * size - 1, DMA_END_ADDR);
        bcmgenet_rdma_ring_writel(priv, index,
                                  (DMA_FC_THRESH_LO <<
                                   DMA_XOFF_THRESHOLD_SHIFT) |
                                   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+
+       /* Set start and end address, read and write pointers */
+       bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
+       bcmgenet_rdma_ring_writel(priv, index, words_per_bd * size - 1,
+                                 DMA_END_ADDR);
 
        return ret;
 }
@@ -1896,7 +1934,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        return ret;
 }
 
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
@@ -1915,6 +1953,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
        kfree(priv->tx_cbs);
 }
 
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+       int i;
+
+       bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+       for (i = 0; i < priv->hw_params->tx_queues; i++)
+               bcmgenet_fini_tx_ring(priv, i);
+
+       __bcmgenet_fini_dma(priv);
+}
+
 /* init_edma: Initialize DMA control register */
 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 {
@@ -1922,18 +1972,33 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
        unsigned int i;
        struct enet_cb *cb;
 
-       netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
+       netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+
+       /* Init rDma */
+       bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+       /* Initialize common Rx ring structures */
+       priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+       priv->num_rx_bds = TOTAL_DESC;
+       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+                              GFP_KERNEL);
+       if (!priv->rx_cbs)
+               return -ENOMEM;
+
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = priv->rx_cbs + i;
+               cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
+       }
 
-       /* by default, enable ring 16 (descriptor based) */
+       /* Initialize Rx default queue 16 */
        ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
        if (ret) {
                netdev_err(priv->dev, "failed to initialize RX ring\n");
+               bcmgenet_free_rx_buffers(priv);
+               kfree(priv->rx_cbs);
                return ret;
        }
 
-       /* init rDma */
-       bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
-
        /* Init tDma */
        bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
 
@@ -1943,7 +2008,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
        priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
                               GFP_KERNEL);
        if (!priv->tx_cbs) {
-               bcmgenet_fini_dma(priv);
+               __bcmgenet_fini_dma(priv);
                return -ENOMEM;
        }
 
@@ -1965,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
                        struct bcmgenet_priv, napi);
        unsigned int work_done;
 
-       /* tx reclaim */
-       bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
        work_done = bcmgenet_desc_rx(priv, budget);
 
        /* Advancing our consumer index*/
@@ -2012,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 {
        struct bcmgenet_priv *priv = dev_id;
+       struct bcmgenet_tx_ring *ring;
        unsigned int index;
 
        /* Save irq status for bottom-half processing. */
        priv->irq1_stat =
                bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
-               ~priv->int1_mask;
+               ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
        /* clear interrupts */
        bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
                  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
        /* Check the MBDONE interrupts.
         * packet is done, reclaim descriptors
         */
-       if (priv->irq1_stat & 0x0000ffff) {
-               index = 0;
-               for (index = 0; index < 16; index++) {
-                       if (priv->irq1_stat & (1 << index))
-                               bcmgenet_tx_reclaim(priv->dev,
-                                                   &priv->tx_rings[index]);
+       for (index = 0; index < priv->hw_params->tx_queues; index++) {
+               if (!(priv->irq1_stat & BIT(index)))
+                       continue;
+
+               ring = &priv->tx_rings[index];
+
+               if (likely(napi_schedule_prep(&ring->napi))) {
+                       ring->int_disable(priv, ring);
+                       __napi_schedule(&ring->napi);
                }
        }
+
        return IRQ_HANDLED;
 }
 
@@ -2065,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        }
        if (priv->irq0_stat &
                        (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
-               /* Tx reclaim */
-               bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+               struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+               if (likely(napi_schedule_prep(&ring->napi))) {
+                       ring->int_disable(priv, ring);
+                       __napi_schedule(&ring->napi);
+               }
        }
        if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
                                UMAC_IRQ_PHY_DET_F |
@@ -2429,6 +2501,7 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
                .tx_queues = 0,
                .tx_bds_per_q = 0,
                .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 16,
                .bp_in_mask = 0xffff,
                .hfb_filter_cnt = 16,
@@ -2441,7 +2514,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
        [GENET_V2] = {
                .tx_queues = 4,
                .tx_bds_per_q = 32,
-               .rx_queues = 4,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 16,
                .bp_in_mask = 0xffff,
                .hfb_filter_cnt = 16,
@@ -2457,7 +2531,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
        [GENET_V3] = {
                .tx_queues = 4,
                .tx_bds_per_q = 32,
-               .rx_queues = 4,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 17,
                .bp_in_mask = 0x1ffff,
                .hfb_filter_cnt = 48,
@@ -2473,7 +2548,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
        [GENET_V4] = {
                .tx_queues = 4,
                .tx_bds_per_q = 32,
-               .rx_queues = 4,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
                .bp_in_en_shift = 17,
                .bp_in_mask = 0x1ffff,
                .hfb_filter_cnt = 48,
@@ -2573,7 +2649,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
 #endif
 
        pr_debug("Configuration for version: %d\n"
-               "TXq: %1d, TXqBDs: %1d, RXq: %1d\n"
+               "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
                "BP << en: %2d, BP msk: 0x%05x\n"
                "HFB count: %2d, QTAQ msk: 0x%05x\n"
                "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
@@ -2581,7 +2657,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
                "Words/BD: %d\n",
                priv->version,
                params->tx_queues, params->tx_bds_per_q,
-               params->rx_queues,
+               params->rx_queues, params->rx_bds_per_q,
                params->bp_in_en_shift, params->bp_in_mask,
                params->hfb_filter_cnt, params->qtag_mask,
                params->tbuf_offset, params->hfb_offset,
@@ -2609,8 +2685,9 @@ static int bcmgenet_probe(struct platform_device *pdev)
        struct resource *r;
        int err = -EIO;
 
-       /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
-       dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+       /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
+       dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
+                                GENET_MAX_MQ_CNT + 1);
        if (!dev) {
                dev_err(&pdev->dev, "can't allocate net device\n");
                return -ENOMEM;
index 3a8a90f953658cf33c1e57e8651b364dc71e1d2c..5684e8529ecc32aa606c6797a57815ecb817ccab 100644 (file)
@@ -505,6 +505,7 @@ struct bcmgenet_hw_params {
        u8              tx_queues;
        u8              tx_bds_per_q;
        u8              rx_queues;
+       u8              rx_bds_per_q;
        u8              bp_in_en_shift;
        u32             bp_in_mask;
        u8              hfb_filter_cnt;
@@ -520,10 +521,12 @@ struct bcmgenet_hw_params {
 
 struct bcmgenet_tx_ring {
        spinlock_t      lock;           /* ring lock */
+       struct napi_struct napi;        /* NAPI per tx queue */
        unsigned int    index;          /* ring index */
        unsigned int    queue;          /* queue index */
        struct enet_cb  *cbs;           /* tx ring buffer control block*/
        unsigned int    size;           /* size of each tx ring */
+       unsigned int    clean_ptr;      /* Tx ring clean pointer */
        unsigned int    c_index;        /* last consumer index of each ring*/
        unsigned int    free_bds;       /* # of free bds for each ring */
        unsigned int    write_ptr;      /* Tx ring write pointer SW copy */
@@ -534,6 +537,7 @@ struct bcmgenet_tx_ring {
                           struct bcmgenet_tx_ring *);
        void (*int_disable)(struct bcmgenet_priv *priv,
                            struct bcmgenet_tx_ring *);
+       struct bcmgenet_priv *priv;
 };
 
 /* device context */
index 23a019cee279af1e502d05dc5f2363372d35b2ce..22b33da32ba46204beb1cb5fd7c725bfaa1dfc71 100644 (file)
@@ -7244,7 +7244,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
                        if (tnapi == &tp->napi[1] && tp->rx_refill)
                                continue;
 
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        /* Reenable interrupts. */
                        tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 
@@ -7337,7 +7337,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
                        sblk->status &= ~SD_STATUS_UPDATED;
 
                if (likely(!tg3_has_work(tnapi))) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        tg3_int_reenable(tnapi);
                        break;
                }
index 63e300f5ba41d174009e6f33b6a70278dda1676e..a37326d44fbb3c9fc41852e6d177fad2ec85f755 100644 (file)
@@ -135,7 +135,7 @@ struct bfa_cee_lldp_str {
        u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
 };
 
-/* LLDP paramters */
+/* LLDP parameters */
 struct bfa_cee_lldp_cfg {
        struct bfa_cee_lldp_str chassis_id;
        struct bfa_cee_lldp_str port_id;
index f2d13238b02e635180a19e1c64986313c4a8908c..594a2ab36d3175de2633490eec1e0395dbb74e59 100644 (file)
@@ -1340,7 +1340,7 @@ bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
        return true;
 }
 
-/* Returns TRUE if major minor and maintainence are same.
+/* Returns TRUE if major minor and maintenance are same.
  * If patch version are same, check for MD5 Checksum to be same.
  */
 static bool
index 66c8507d77177049bdfa1e046fd2d4d2d0c161f9..2e72445dbb4f661748d0e90597889c2e92fc5b8d 100644 (file)
@@ -699,7 +699,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
 
        /*
         * Ignore mode and program for the max clock (which is FC16)
-        * Firmware/NFC will do the PLL init appropiately
+        * Firmware/NFC will do the PLL init appropriately
         */
        r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
        r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
index f1e1129e62417939e3c35f468ef0265a3504dab5..2bcde4042268add6440deaca93a3255740e54a16 100644 (file)
@@ -159,8 +159,8 @@ enum bfi_asic_gen {
 };
 
 enum bfi_asic_mode {
-       BFI_ASIC_MODE_FC        = 1,    /* FC upto 8G speed             */
-       BFI_ASIC_MODE_FC16      = 2,    /* FC upto 16G speed            */
+       BFI_ASIC_MODE_FC        = 1,    /* FC up to 8G speed            */
+       BFI_ASIC_MODE_FC16      = 2,    /* FC up to 16G speed           */
        BFI_ASIC_MODE_ETH       = 3,    /* Ethernet ports               */
        BFI_ASIC_MODE_COMBO     = 4,    /* FC 16G and Ethernet 10G port */
 };
index c5feab130d6d6c351d7b89bf4fefb9fbbd02455c..174af0e9d05611fa40a54a6a774aa1a21cfa1ab1 100644 (file)
@@ -363,7 +363,7 @@ struct bna_txq_wi_vector {
 
 /*  TxQ Entry Structure
  *
- *  BEWARE:  Load values into this structure with correct endianess.
+ *  BEWARE:  Load values into this structure with correct endianness.
  */
 struct bna_txq_entry {
        union {
index 321d2ad235d9143547efdb9d3100b8d91fd45d68..1ba3e3a67389d8a793fb7fbc6ed8a431586597e5 100644 (file)
@@ -4,7 +4,7 @@
 
 config NET_CADENCE
        bool "Cadence devices"
-       depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
+       depends on HAS_IOMEM
        default y
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
@@ -20,17 +20,9 @@ config NET_CADENCE
 
 if NET_CADENCE
 
-config ARM_AT91_ETHER
-       tristate "AT91RM9200 Ethernet support"
-       depends on HAS_DMA && (ARCH_AT91 || COMPILE_TEST)
-       select MACB
-       ---help---
-         If you wish to compile a kernel for the AT91RM9200 and enable
-         ethernet support, then you should always answer Y to this.
-
 config MACB
        tristate "Cadence MACB/GEM support"
-       depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
+       depends on HAS_DMA
        select PHYLIB
        ---help---
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
index 9068b8331ed18b52f5df76b75d5f5f9ad4125cf2..91f79b1f0505d25beb9935790f47146c555024cf 100644 (file)
@@ -2,5 +2,4 @@
 # Makefile for the Atmel network device drivers.
 #
 
-obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
 obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
deleted file mode 100644 (file)
index 7ef55f5..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * Ethernet driver for the Atmel AT91RM9200 (Thunder)
- *
- *  Copyright (C) 2003 SAN People (Pty) Ltd
- *
- * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
- * Initial version by Rick Bronson 01/11/2003
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/dma-mapping.h>
-#include <linux/ethtool.h>
-#include <linux/platform_data/macb.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gfp.h>
-#include <linux/phy.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_net.h>
-
-#include "macb.h"
-
-/* 1518 rounded up */
-#define MAX_RBUFF_SZ   0x600
-/* max number of receive buffers */
-#define MAX_RX_DESCR   9
-
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       dma_addr_t addr;
-       u32 ctl;
-       int i;
-
-       lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
-                                        (MAX_RX_DESCR *
-                                         sizeof(struct macb_dma_desc)),
-                                        &lp->rx_ring_dma, GFP_KERNEL);
-       if (!lp->rx_ring)
-               return -ENOMEM;
-
-       lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
-                                           MAX_RX_DESCR * MAX_RBUFF_SZ,
-                                           &lp->rx_buffers_dma, GFP_KERNEL);
-       if (!lp->rx_buffers) {
-               dma_free_coherent(&lp->pdev->dev,
-                                 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
-                                 lp->rx_ring, lp->rx_ring_dma);
-               lp->rx_ring = NULL;
-               return -ENOMEM;
-       }
-
-       addr = lp->rx_buffers_dma;
-       for (i = 0; i < MAX_RX_DESCR; i++) {
-               lp->rx_ring[i].addr = addr;
-               lp->rx_ring[i].ctrl = 0;
-               addr += MAX_RBUFF_SZ;
-       }
-
-       /* Set the Wrap bit on the last descriptor */
-       lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
-
-       /* Reset buffer index */
-       lp->rx_tail = 0;
-
-       /* Program address of descriptor list in Rx Buffer Queue register */
-       macb_writel(lp, RBQP, lp->rx_ring_dma);
-
-       /* Enable Receive and Transmit */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
-
-       return 0;
-}
-
-/* Open the ethernet interface */
-static int at91ether_open(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       u32 ctl;
-       int ret;
-
-       /* Clear internal statistics */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
-
-       macb_set_hwaddr(lp);
-
-       ret = at91ether_start(dev);
-       if (ret)
-               return ret;
-
-       /* Enable MAC interrupts */
-       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
-
-       /* schedule a link state check */
-       phy_start(lp->phy_dev);
-
-       netif_start_queue(dev);
-
-       return 0;
-}
-
-/* Close the interface */
-static int at91ether_close(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       u32 ctl;
-
-       /* Disable Receiver and Transmitter */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
-       /* Disable MAC interrupts */
-       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
-
-       netif_stop_queue(dev);
-
-       dma_free_coherent(&lp->pdev->dev,
-                               MAX_RX_DESCR * sizeof(struct macb_dma_desc),
-                               lp->rx_ring, lp->rx_ring_dma);
-       lp->rx_ring = NULL;
-
-       dma_free_coherent(&lp->pdev->dev,
-                               MAX_RX_DESCR * MAX_RBUFF_SZ,
-                               lp->rx_buffers, lp->rx_buffers_dma);
-       lp->rx_buffers = NULL;
-
-       return 0;
-}
-
-/* Transmit packet */
-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-
-       if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
-               netif_stop_queue(dev);
-
-               /* Store packet information (to free when Tx completed) */
-               lp->skb = skb;
-               lp->skb_length = skb->len;
-               lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
-                                                       DMA_TO_DEVICE);
-
-               /* Set address of the data in the Transmit Address register */
-               macb_writel(lp, TAR, lp->skb_physaddr);
-               /* Set length of the packet in the Transmit Control register */
-               macb_writel(lp, TCR, skb->len);
-
-       } else {
-               netdev_err(dev, "%s called, but device is busy!\n", __func__);
-               return NETDEV_TX_BUSY;
-       }
-
-       return NETDEV_TX_OK;
-}
-
-/* Extract received frame from buffer descriptors and sent to upper layers.
- * (Called from interrupt context)
- */
-static void at91ether_rx(struct net_device *dev)
-{
-       struct macb *lp = netdev_priv(dev);
-       unsigned char *p_recv;
-       struct sk_buff *skb;
-       unsigned int pktlen;
-
-       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
-               p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
-               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
-               skb = netdev_alloc_skb(dev, pktlen + 2);
-               if (skb) {
-                       skb_reserve(skb, 2);
-                       memcpy(skb_put(skb, pktlen), p_recv, pktlen);
-
-                       skb->protocol = eth_type_trans(skb, dev);
-                       lp->stats.rx_packets++;
-                       lp->stats.rx_bytes += pktlen;
-                       netif_rx(skb);
-               } else {
-                       lp->stats.rx_dropped++;
-               }
-
-               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
-                       lp->stats.multicast++;
-
-               /* reset ownership bit */
-               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
-
-               /* wrap after last buffer */
-               if (lp->rx_tail == MAX_RX_DESCR - 1)
-                       lp->rx_tail = 0;
-               else
-                       lp->rx_tail++;
-       }
-}
-
-/* MAC interrupt handler */
-static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct macb *lp = netdev_priv(dev);
-       u32 intstatus, ctl;
-
-       /* MAC Interrupt Status register indicates what interrupts are pending.
-        * It is automatically cleared once read.
-        */
-       intstatus = macb_readl(lp, ISR);
-
-       /* Receive complete */
-       if (intstatus & MACB_BIT(RCOMP))
-               at91ether_rx(dev);
-
-       /* Transmit complete */
-       if (intstatus & MACB_BIT(TCOMP)) {
-               /* The TCOM bit is set even if the transmission failed */
-               if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
-                       lp->stats.tx_errors++;
-
-               if (lp->skb) {
-                       dev_kfree_skb_irq(lp->skb);
-                       lp->skb = NULL;
-                       dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
-                       lp->stats.tx_packets++;
-                       lp->stats.tx_bytes += lp->skb_length;
-               }
-               netif_wake_queue(dev);
-       }
-
-       /* Work-around for EMAC Errata section 41.3.1 */
-       if (intstatus & MACB_BIT(RXUBR)) {
-               ctl = macb_readl(lp, NCR);
-               macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
-               macb_writel(lp, NCR, ctl | MACB_BIT(RE));
-       }
-
-       if (intstatus & MACB_BIT(ISR_ROVR))
-               netdev_err(dev, "ROVR error\n");
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void at91ether_poll_controller(struct net_device *dev)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       at91ether_interrupt(dev->irq, dev);
-       local_irq_restore(flags);
-}
-#endif
-
-static const struct net_device_ops at91ether_netdev_ops = {
-       .ndo_open               = at91ether_open,
-       .ndo_stop               = at91ether_close,
-       .ndo_start_xmit         = at91ether_start_xmit,
-       .ndo_get_stats          = macb_get_stats,
-       .ndo_set_rx_mode        = macb_set_rx_mode,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_do_ioctl           = macb_ioctl,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_change_mtu         = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = at91ether_poll_controller,
-#endif
-};
-
-#if defined(CONFIG_OF)
-static const struct of_device_id at91ether_dt_ids[] = {
-       { .compatible = "cdns,at91rm9200-emac" },
-       { .compatible = "cdns,emac" },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
-#endif
-
-/* Detect MAC & PHY and perform ethernet interface initialization */
-static int __init at91ether_probe(struct platform_device *pdev)
-{
-       struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
-       struct resource *regs;
-       struct net_device *dev;
-       struct phy_device *phydev;
-       struct macb *lp;
-       int res;
-       u32 reg;
-       const char *mac;
-
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs)
-               return -ENOENT;
-
-       dev = alloc_etherdev(sizeof(struct macb));
-       if (!dev)
-               return -ENOMEM;
-
-       lp = netdev_priv(dev);
-       lp->pdev = pdev;
-       lp->dev = dev;
-       spin_lock_init(&lp->lock);
-
-       /* physical base address */
-       dev->base_addr = regs->start;
-       lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
-       if (!lp->regs) {
-               res = -ENOMEM;
-               goto err_free_dev;
-       }
-
-       /* Clock */
-       lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
-       if (IS_ERR(lp->pclk)) {
-               res = PTR_ERR(lp->pclk);
-               goto err_free_dev;
-       }
-       clk_prepare_enable(lp->pclk);
-
-       lp->hclk = ERR_PTR(-ENOENT);
-       lp->tx_clk = ERR_PTR(-ENOENT);
-
-       /* Install the interrupt handler */
-       dev->irq = platform_get_irq(pdev, 0);
-       res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
-       if (res)
-               goto err_disable_clock;
-
-       dev->netdev_ops = &at91ether_netdev_ops;
-       dev->ethtool_ops = &macb_ethtool_ops;
-       platform_set_drvdata(pdev, dev);
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       mac = of_get_mac_address(pdev->dev.of_node);
-       if (mac)
-               memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
-       else
-               macb_get_hwaddr(lp);
-
-       res = of_get_phy_mode(pdev->dev.of_node);
-       if (res < 0) {
-               if (board_data && board_data->is_rmii)
-                       lp->phy_interface = PHY_INTERFACE_MODE_RMII;
-               else
-                       lp->phy_interface = PHY_INTERFACE_MODE_MII;
-       } else {
-               lp->phy_interface = res;
-       }
-
-       macb_writel(lp, NCR, 0);
-
-       reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
-       if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
-               reg |= MACB_BIT(RM9200_RMII);
-
-       macb_writel(lp, NCFGR, reg);
-
-       /* Register the network interface */
-       res = register_netdev(dev);
-       if (res)
-               goto err_disable_clock;
-
-       res = macb_mii_init(lp);
-       if (res)
-               goto err_out_unregister_netdev;
-
-       /* will be enabled in open() */
-       netif_carrier_off(dev);
-
-       phydev = lp->phy_dev;
-       netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
-                               phydev->drv->name, dev_name(&phydev->dev),
-                               phydev->irq);
-
-       /* Display ethernet banner */
-       netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
-                               dev->base_addr, dev->irq, dev->dev_addr);
-
-       return 0;
-
-err_out_unregister_netdev:
-       unregister_netdev(dev);
-err_disable_clock:
-       clk_disable_unprepare(lp->pclk);
-err_free_dev:
-       free_netdev(dev);
-       return res;
-}
-
-static int at91ether_remove(struct platform_device *pdev)
-{
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct macb *lp = netdev_priv(dev);
-
-       if (lp->phy_dev)
-               phy_disconnect(lp->phy_dev);
-
-       mdiobus_unregister(lp->mii_bus);
-       kfree(lp->mii_bus->irq);
-       mdiobus_free(lp->mii_bus);
-       unregister_netdev(dev);
-       clk_disable_unprepare(lp->pclk);
-       free_netdev(dev);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
-       struct net_device *net_dev = platform_get_drvdata(pdev);
-       struct macb *lp = netdev_priv(net_dev);
-
-       if (netif_running(net_dev)) {
-               netif_stop_queue(net_dev);
-               netif_device_detach(net_dev);
-
-               clk_disable_unprepare(lp->pclk);
-       }
-       return 0;
-}
-
-static int at91ether_resume(struct platform_device *pdev)
-{
-       struct net_device *net_dev = platform_get_drvdata(pdev);
-       struct macb *lp = netdev_priv(net_dev);
-
-       if (netif_running(net_dev)) {
-               clk_prepare_enable(lp->pclk);
-
-               netif_device_attach(net_dev);
-               netif_start_queue(net_dev);
-       }
-       return 0;
-}
-#else
-#define at91ether_suspend      NULL
-#define at91ether_resume       NULL
-#endif
-
-static struct platform_driver at91ether_driver = {
-       .remove         = at91ether_remove,
-       .suspend        = at91ether_suspend,
-       .resume         = at91ether_resume,
-       .driver         = {
-               .name   = "at91_ether",
-               .of_match_table = of_match_ptr(at91ether_dt_ids),
-       },
-};
-
-module_platform_driver_probe(at91ether_driver, at91ether_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
-MODULE_AUTHOR("Andrew Victor");
-MODULE_ALIAS("platform:at91_ether");
index 1fe8b946243af5187851569a13be4c5e914dade0..a4c5462c071a968713f36c0861bc1b8d78d54dae 100644 (file)
@@ -102,7 +102,7 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
        return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
 }
 
-void macb_set_hwaddr(struct macb *bp)
+static void macb_set_hwaddr(struct macb *bp)
 {
        u32 bottom;
        u16 top;
@@ -120,9 +120,8 @@ void macb_set_hwaddr(struct macb *bp)
        macb_or_gem_writel(bp, SA4B, 0);
        macb_or_gem_writel(bp, SA4T, 0);
 }
-EXPORT_SYMBOL_GPL(macb_set_hwaddr);
 
-void macb_get_hwaddr(struct macb *bp)
+static void macb_get_hwaddr(struct macb *bp)
 {
        struct macb_platform_data *pdata;
        u32 bottom;
@@ -162,7 +161,6 @@ void macb_get_hwaddr(struct macb *bp)
        netdev_info(bp->dev, "invalid hw address, using random\n");
        eth_hw_addr_random(bp->dev);
 }
-EXPORT_SYMBOL_GPL(macb_get_hwaddr);
 
 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
@@ -213,6 +211,9 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
 {
        long ferr, rate, rate_rounded;
 
+       if (!clk)
+               return;
+
        switch (speed) {
        case SPEED_10:
                rate = 2500000;
@@ -292,8 +293,7 @@ static void macb_handle_link_change(struct net_device *dev)
 
        spin_unlock_irqrestore(&bp->lock, flags);
 
-       if (!IS_ERR(bp->tx_clk))
-               macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+       macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
 
        if (status_change) {
                if (phydev->link) {
@@ -357,7 +357,7 @@ static int macb_mii_probe(struct net_device *dev)
        return 0;
 }
 
-int macb_mii_init(struct macb *bp)
+static int macb_mii_init(struct macb *bp)
 {
        struct macb_platform_data *pdata;
        struct device_node *np;
@@ -438,7 +438,6 @@ err_out_free_mdiobus:
 err_out:
        return err;
 }
-EXPORT_SYMBOL_GPL(macb_mii_init);
 
 static void macb_update_stats(struct macb *bp)
 {
@@ -1741,7 +1740,7 @@ static void macb_sethashtable(struct net_device *dev)
 /*
  * Enable/Disable promiscuous and multicast modes.
  */
-void macb_set_rx_mode(struct net_device *dev)
+static void macb_set_rx_mode(struct net_device *dev)
 {
        unsigned long cfg;
        struct macb *bp = netdev_priv(dev);
@@ -1782,7 +1781,6 @@ void macb_set_rx_mode(struct net_device *dev)
 
        macb_writel(bp, NCFGR, cfg);
 }
-EXPORT_SYMBOL_GPL(macb_set_rx_mode);
 
 static int macb_open(struct net_device *dev)
 {
@@ -1935,7 +1933,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
        }
 }
 
-struct net_device_stats *macb_get_stats(struct net_device *dev)
+static struct net_device_stats *macb_get_stats(struct net_device *dev)
 {
        struct macb *bp = netdev_priv(dev);
        struct net_device_stats *nstat = &bp->stats;
@@ -1981,7 +1979,6 @@ struct net_device_stats *macb_get_stats(struct net_device *dev)
 
        return nstat;
 }
-EXPORT_SYMBOL_GPL(macb_get_stats);
 
 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
@@ -2043,7 +2040,7 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        }
 }
 
-const struct ethtool_ops macb_ethtool_ops = {
+static const struct ethtool_ops macb_ethtool_ops = {
        .get_settings           = macb_get_settings,
        .set_settings           = macb_set_settings,
        .get_regs_len           = macb_get_regs_len,
@@ -2051,7 +2048,6 @@ const struct ethtool_ops macb_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_ts_info            = ethtool_op_get_ts_info,
 };
-EXPORT_SYMBOL_GPL(macb_ethtool_ops);
 
 static const struct ethtool_ops gem_ethtool_ops = {
        .get_settings           = macb_get_settings,
@@ -2065,7 +2061,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
        .get_sset_count         = gem_get_sset_count,
 };
 
-int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct macb *bp = netdev_priv(dev);
        struct phy_device *phydev = bp->phy_dev;
@@ -2078,7 +2074,6 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
        return phy_mii_ioctl(phydev, rq, cmd);
 }
-EXPORT_SYMBOL_GPL(macb_ioctl);
 
 static int macb_set_features(struct net_device *netdev,
                             netdev_features_t features)
@@ -2130,35 +2125,6 @@ static const struct net_device_ops macb_netdev_ops = {
        .ndo_set_features       = macb_set_features,
 };
 
-#if defined(CONFIG_OF)
-static struct macb_config pc302gem_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
-       .dma_burst_length = 16,
-};
-
-static struct macb_config sama5d3_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
-       .dma_burst_length = 16,
-};
-
-static struct macb_config sama5d4_config = {
-       .caps = 0,
-       .dma_burst_length = 4,
-};
-
-static const struct of_device_id macb_dt_ids[] = {
-       { .compatible = "cdns,at32ap7000-macb" },
-       { .compatible = "cdns,at91sam9260-macb" },
-       { .compatible = "cdns,macb" },
-       { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
-       { .compatible = "cdns,gem", .data = &pc302gem_config },
-       { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
-       { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, macb_dt_ids);
-#endif
-
 /*
  * Configure peripheral capacities according to device tree
  * and integration options used
@@ -2166,22 +2132,6 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
 static void macb_configure_caps(struct macb *bp)
 {
        u32 dcfg;
-       const struct of_device_id *match;
-       const struct macb_config *config;
-
-       if (bp->pdev->dev.of_node) {
-               match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
-               if (match && match->data) {
-                       config = (const struct macb_config *)match->data;
-
-                       bp->caps = config->caps;
-                       /*
-                        * As we have access to the matching node, configure
-                        * DMA burst length as well
-                        */
-                       bp->dma_burst_length = config->dma_burst_length;
-               }
-       }
 
        if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
                bp->caps |= MACB_CAPS_MACB_IS_GEM;
@@ -2211,7 +2161,7 @@ static void macb_probe_queues(void __iomem *mem,
        /* is it macb or gem ? */
        mid = readl_relaxed(mem + MACB_MID);
 
-       if (MACB_BFEXT(IDNUM, mid) != 0x2)
+       if (MACB_BFEXT(IDNUM, mid) < 0x2)
                return;
 
        /* bit 0 is never set but queue 0 always exists */
@@ -2224,93 +2174,57 @@ static void macb_probe_queues(void __iomem *mem,
                        (*num_queues)++;
 }
 
-static int macb_probe(struct platform_device *pdev)
+static int macb_init(struct platform_device *pdev)
 {
-       struct macb_platform_data *pdata;
-       struct resource *regs;
-       struct net_device *dev;
-       struct macb *bp;
-       struct macb_queue *queue;
-       struct phy_device *phydev;
-       u32 config;
-       int err = -ENXIO;
-       const char *mac;
-       void __iomem *mem;
+       struct net_device *dev = platform_get_drvdata(pdev);
        unsigned int hw_q, queue_mask, q, num_queues;
-       struct clk *pclk, *hclk, *tx_clk;
-
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_err(&pdev->dev, "no mmio resource defined\n");
-               goto err_out;
-       }
+       struct macb *bp = netdev_priv(dev);
+       struct macb_queue *queue;
+       int err;
+       u32 val;
 
-       pclk = devm_clk_get(&pdev->dev, "pclk");
-       if (IS_ERR(pclk)) {
-               err = PTR_ERR(pclk);
+       bp->pclk = devm_clk_get(&pdev->dev, "pclk");
+       if (IS_ERR(bp->pclk)) {
+               err = PTR_ERR(bp->pclk);
                dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
-               goto err_out;
+               return err;
        }
 
-       hclk = devm_clk_get(&pdev->dev, "hclk");
-       if (IS_ERR(hclk)) {
-               err = PTR_ERR(hclk);
+       bp->hclk = devm_clk_get(&pdev->dev, "hclk");
+       if (IS_ERR(bp->hclk)) {
+               err = PTR_ERR(bp->hclk);
                dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
-               goto err_out;
+               return err;
        }
 
-       tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+       bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+       if (IS_ERR(bp->tx_clk))
+               bp->tx_clk = NULL;
 
-       err = clk_prepare_enable(pclk);
+       err = clk_prepare_enable(bp->pclk);
        if (err) {
                dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
-               goto err_out;
+               return err;
        }
 
-       err = clk_prepare_enable(hclk);
+       err = clk_prepare_enable(bp->hclk);
        if (err) {
                dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
-               goto err_out_disable_pclk;
+               goto err_disable_pclk;
        }
 
-       if (!IS_ERR(tx_clk)) {
-               err = clk_prepare_enable(tx_clk);
-               if (err) {
-                       dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
-                               err);
-                       goto err_out_disable_hclk;
-               }
-       }
-
-       err = -ENOMEM;
-       mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
-       if (!mem) {
-               dev_err(&pdev->dev, "failed to map registers, aborting.\n");
-               goto err_out_disable_clocks;
+       err = clk_prepare_enable(bp->tx_clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+               goto err_disable_hclk;
        }
 
-       macb_probe_queues(mem, &queue_mask, &num_queues);
-       dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
-       if (!dev)
-               goto err_out_disable_clocks;
-
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       bp = netdev_priv(dev);
-       bp->pdev = pdev;
-       bp->dev = dev;
-       bp->regs = mem;
-       bp->num_queues = num_queues;
-       bp->pclk = pclk;
-       bp->hclk = hclk;
-       bp->tx_clk = tx_clk;
-
-       spin_lock_init(&bp->lock);
-
        /* set the queue register mapping once for all: queue0 has a special
         * register mapping but we don't want to test the queue index then
         * compute the corresponding register offset at run time.
         */
+       macb_probe_queues(bp->regs, &queue_mask, &num_queues);
+
        for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
                if (!(queue_mask & (1 << hw_q)))
                        continue;
@@ -2339,27 +2253,21 @@ static int macb_probe(struct platform_device *pdev)
                 */
                queue->irq = platform_get_irq(pdev, q);
                err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
-                                      0, dev->name, queue);
+                                      IRQF_SHARED, dev->name, queue);
                if (err) {
                        dev_err(&pdev->dev,
                                "Unable to request IRQ %d (error %d)\n",
                                queue->irq, err);
-                       goto err_out_free_netdev;
+                       goto err_disable_tx_clk;
                }
 
                INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
                q++;
        }
-       dev->irq = bp->queues[0].irq;
 
        dev->netdev_ops = &macb_netdev_ops;
        netif_napi_add(dev, &bp->napi, macb_poll, 64);
 
-       dev->base_addr = regs->start;
-
-       /* setup capacities */
-       macb_configure_caps(bp);
-
        /* setup appropriated routines according to adapter type */
        if (macb_is_gem(bp)) {
                bp->max_tx_length = GEM_MAX_TX_LEN;
@@ -2386,18 +2294,439 @@ static int macb_probe(struct platform_device *pdev)
                dev->hw_features &= ~NETIF_F_SG;
        dev->features = dev->hw_features;
 
+       val = 0;
+       if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               val = GEM_BIT(RGMII);
+       else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
+                (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+               val = MACB_BIT(RMII);
+       else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+               val = MACB_BIT(MII);
+
+       if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
+               val |= MACB_BIT(CLKEN);
+
+       macb_or_gem_writel(bp, USRIO, val);
+
+       /* setup capacities */
+       macb_configure_caps(bp);
+
        /* Set MII management clock divider */
-       config = macb_mdc_clk_div(bp);
-       config |= macb_dbw(bp);
-       macb_writel(bp, NCFGR, config);
+       val = macb_mdc_clk_div(bp);
+       val |= macb_dbw(bp);
+       macb_writel(bp, NCFGR, val);
+
+       return 0;
+
+err_disable_tx_clk:
+       clk_disable_unprepare(bp->tx_clk);
+
+err_disable_hclk:
+       clk_disable_unprepare(bp->hclk);
+
+err_disable_pclk:
+       clk_disable_unprepare(bp->pclk);
+
+       return err;
+}
+
+#if defined(CONFIG_OF)
+/* 1518 rounded up */
+#define AT91ETHER_MAX_RBUFF_SZ 0x600
+/* max number of receive buffers */
+#define AT91ETHER_MAX_RX_DESCR 9
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       dma_addr_t addr;
+       u32 ctl;
+       int i;
+
+       lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+                                        (AT91ETHER_MAX_RX_DESCR *
+                                         sizeof(struct macb_dma_desc)),
+                                        &lp->rx_ring_dma, GFP_KERNEL);
+       if (!lp->rx_ring)
+               return -ENOMEM;
+
+       lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+                                           AT91ETHER_MAX_RX_DESCR *
+                                           AT91ETHER_MAX_RBUFF_SZ,
+                                           &lp->rx_buffers_dma, GFP_KERNEL);
+       if (!lp->rx_buffers) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 sizeof(struct macb_dma_desc),
+                                 lp->rx_ring, lp->rx_ring_dma);
+               lp->rx_ring = NULL;
+               return -ENOMEM;
+       }
+
+       addr = lp->rx_buffers_dma;
+       for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
+               lp->rx_ring[i].addr = addr;
+               lp->rx_ring[i].ctrl = 0;
+               addr += AT91ETHER_MAX_RBUFF_SZ;
+       }
+
+       /* Set the Wrap bit on the last descriptor */
+       lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+
+       /* Reset buffer index */
+       lp->rx_tail = 0;
+
+       /* Program address of descriptor list in Rx Buffer Queue register */
+       macb_writel(lp, RBQP, lp->rx_ring_dma);
+
+       /* Enable Receive and Transmit */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+       return 0;
+}
+
+/* Open the ethernet interface */
+static int at91ether_open(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       u32 ctl;
+       int ret;
+
+       /* Clear internal statistics */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+       macb_set_hwaddr(lp);
+
+       ret = at91ether_start(dev);
+       if (ret)
+               return ret;
+
+       /* Enable MAC interrupts */
+       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       /* schedule a link state check */
+       phy_start(lp->phy_dev);
+
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+/* Close the interface */
+static int at91ether_close(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       u32 ctl;
+
+       /* Disable Receiver and Transmitter */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+       /* Disable MAC interrupts */
+       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       netif_stop_queue(dev);
+
+       dma_free_coherent(&lp->pdev->dev,
+                         AT91ETHER_MAX_RX_DESCR *
+                         sizeof(struct macb_dma_desc),
+                         lp->rx_ring, lp->rx_ring_dma);
+       lp->rx_ring = NULL;
+
+       dma_free_coherent(&lp->pdev->dev,
+                         AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
+                         lp->rx_buffers, lp->rx_buffers_dma);
+       lp->rx_buffers = NULL;
+
+       return 0;
+}
+
+/* Transmit packet */
+static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+
+       if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+               netif_stop_queue(dev);
+
+               /* Store packet information (to free when Tx completed) */
+               lp->skb = skb;
+               lp->skb_length = skb->len;
+               lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+                                                       DMA_TO_DEVICE);
+
+               /* Set address of the data in the Transmit Address register */
+               macb_writel(lp, TAR, lp->skb_physaddr);
+               /* Set length of the packet in the Transmit Control register */
+               macb_writel(lp, TCR, skb->len);
+
+       } else {
+               netdev_err(dev, "%s called, but device is busy!\n", __func__);
+               return NETDEV_TX_BUSY;
+       }
+
+       return NETDEV_TX_OK;
+}
+
+/* Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static void at91ether_rx(struct net_device *dev)
+{
+       struct macb *lp = netdev_priv(dev);
+       unsigned char *p_recv;
+       struct sk_buff *skb;
+       unsigned int pktlen;
+
+       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+               p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+               skb = netdev_alloc_skb(dev, pktlen + 2);
+               if (skb) {
+                       skb_reserve(skb, 2);
+                       memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+
+                       skb->protocol = eth_type_trans(skb, dev);
+                       lp->stats.rx_packets++;
+                       lp->stats.rx_bytes += pktlen;
+                       netif_rx(skb);
+               } else {
+                       lp->stats.rx_dropped++;
+               }
+
+               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+                       lp->stats.multicast++;
+
+               /* reset ownership bit */
+               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+               /* wrap after last buffer */
+               if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+                       lp->rx_tail = 0;
+               else
+                       lp->rx_tail++;
+       }
+}
+
+/* MAC interrupt handler */
+static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct macb *lp = netdev_priv(dev);
+       u32 intstatus, ctl;
+
+       /* MAC Interrupt Status register indicates what interrupts are pending.
+        * It is automatically cleared once read.
+        */
+       intstatus = macb_readl(lp, ISR);
+
+       /* Receive complete */
+       if (intstatus & MACB_BIT(RCOMP))
+               at91ether_rx(dev);
+
+       /* Transmit complete */
+       if (intstatus & MACB_BIT(TCOMP)) {
+               /* The TCOM bit is set even if the transmission failed */
+               if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+                       lp->stats.tx_errors++;
+
+               if (lp->skb) {
+                       dev_kfree_skb_irq(lp->skb);
+                       lp->skb = NULL;
+                       dma_unmap_single(NULL, lp->skb_physaddr,
+                                        lp->skb_length, DMA_TO_DEVICE);
+                       lp->stats.tx_packets++;
+                       lp->stats.tx_bytes += lp->skb_length;
+               }
+               netif_wake_queue(dev);
+       }
+
+       /* Work-around for EMAC Errata section 41.3.1 */
+       if (intstatus & MACB_BIT(RXUBR)) {
+               ctl = macb_readl(lp, NCR);
+               macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+               macb_writel(lp, NCR, ctl | MACB_BIT(RE));
+       }
+
+       if (intstatus & MACB_BIT(ISR_ROVR))
+               netdev_err(dev, "ROVR error\n");
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       at91ether_interrupt(dev->irq, dev);
+       local_irq_restore(flags);
+}
+#endif
+
+static const struct net_device_ops at91ether_netdev_ops = {
+       .ndo_open               = at91ether_open,
+       .ndo_stop               = at91ether_close,
+       .ndo_start_xmit         = at91ether_start_xmit,
+       .ndo_get_stats          = macb_get_stats,
+       .ndo_set_rx_mode        = macb_set_rx_mode,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_do_ioctl           = macb_ioctl,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = at91ether_poll_controller,
+#endif
+};
+
+static int at91ether_init(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct macb *bp = netdev_priv(dev);
+       int err;
+       u32 reg;
+
+       bp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+       if (IS_ERR(bp->pclk))
+               return PTR_ERR(bp->pclk);
+
+       err = clk_prepare_enable(bp->pclk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+               return err;
+       }
+
+       dev->netdev_ops = &at91ether_netdev_ops;
+       dev->ethtool_ops = &macb_ethtool_ops;
+
+       err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
+                              0, dev->name, dev);
+       if (err)
+               goto err_disable_clk;
+
+       macb_writel(bp, NCR, 0);
+
+       reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+       if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+               reg |= MACB_BIT(RM9200_RMII);
+
+       macb_writel(bp, NCFGR, reg);
+
+       return 0;
+
+err_disable_clk:
+       clk_disable_unprepare(bp->pclk);
+
+       return err;
+}
+
+static struct macb_config at91sam9260_config = {
+       .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
+       .init = macb_init,
+};
+
+static struct macb_config pc302gem_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+       .init = macb_init,
+};
 
-       mac = of_get_mac_address(pdev->dev.of_node);
+static struct macb_config sama5d3_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+       .init = macb_init,
+};
+
+static struct macb_config sama5d4_config = {
+       .caps = 0,
+       .dma_burst_length = 4,
+       .init = macb_init,
+};
+
+static struct macb_config emac_config = {
+       .init = at91ether_init,
+};
+
+static const struct of_device_id macb_dt_ids[] = {
+       { .compatible = "cdns,at32ap7000-macb" },
+       { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
+       { .compatible = "cdns,macb" },
+       { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
+       { .compatible = "cdns,gem", .data = &pc302gem_config },
+       { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+       { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
+       { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
+       { .compatible = "cdns,emac", .data = &emac_config },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+#endif /* CONFIG_OF */
+
+static int macb_probe(struct platform_device *pdev)
+{
+       int (*init)(struct platform_device *) = macb_init;
+       struct device_node *np = pdev->dev.of_node;
+       const struct macb_config *macb_config = NULL;
+       unsigned int queue_mask, num_queues;
+       struct macb_platform_data *pdata;
+       struct phy_device *phydev;
+       struct net_device *dev;
+       struct resource *regs;
+       void __iomem *mem;
+       const char *mac;
+       struct macb *bp;
+       int err;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mem = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
+
+       macb_probe_queues(mem, &queue_mask, &num_queues);
+       dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->base_addr = regs->start;
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       bp = netdev_priv(dev);
+       bp->pdev = pdev;
+       bp->dev = dev;
+       bp->regs = mem;
+       bp->num_queues = num_queues;
+       spin_lock_init(&bp->lock);
+
+       platform_set_drvdata(pdev, dev);
+
+       dev->irq = platform_get_irq(pdev, 0);
+       if (dev->irq < 0)
+               return dev->irq;
+
+       mac = of_get_mac_address(np);
        if (mac)
                memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
        else
                macb_get_hwaddr(bp);
 
-       err = of_get_phy_mode(pdev->dev.of_node);
+       err = of_get_phy_mode(np);
        if (err < 0) {
                pdata = dev_get_platdata(&pdev->dev);
                if (pdata && pdata->is_rmii)
@@ -2408,34 +2737,35 @@ static int macb_probe(struct platform_device *pdev)
                bp->phy_interface = err;
        }
 
-       if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
-               macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
-       else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
-#if defined(CONFIG_ARCH_AT91)
-               macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
-                                              MACB_BIT(CLKEN)));
-#else
-               macb_or_gem_writel(bp, USRIO, 0);
-#endif
-       else
-#if defined(CONFIG_ARCH_AT91)
-               macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
-#else
-               macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
-#endif
+       if (np) {
+               const struct of_device_id *match;
+
+               match = of_match_node(macb_dt_ids, np);
+               if (match)
+                       macb_config = match->data;
+       }
+
+       if (macb_config) {
+               bp->caps = macb_config->caps;
+               bp->dma_burst_length = macb_config->dma_burst_length;
+               init = macb_config->init;
+       }
+
+       /* IP specific init */
+       err = init(pdev);
+       if (err)
+               goto err_out_free_netdev;
 
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_free_netdev;
+               goto err_disable_clocks;
        }
 
        err = macb_mii_init(bp);
        if (err)
                goto err_out_unregister_netdev;
 
-       platform_set_drvdata(pdev, dev);
-
        netif_carrier_off(dev);
 
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -2450,16 +2780,15 @@ static int macb_probe(struct platform_device *pdev)
 
 err_out_unregister_netdev:
        unregister_netdev(dev);
+
+err_disable_clocks:
+       clk_disable_unprepare(bp->tx_clk);
+       clk_disable_unprepare(bp->hclk);
+       clk_disable_unprepare(bp->pclk);
+
 err_out_free_netdev:
        free_netdev(dev);
-err_out_disable_clocks:
-       if (!IS_ERR(tx_clk))
-               clk_disable_unprepare(tx_clk);
-err_out_disable_hclk:
-       clk_disable_unprepare(hclk);
-err_out_disable_pclk:
-       clk_disable_unprepare(pclk);
-err_out:
+
        return err;
 }
 
@@ -2478,8 +2807,7 @@ static int macb_remove(struct platform_device *pdev)
                kfree(bp->mii_bus->irq);
                mdiobus_free(bp->mii_bus);
                unregister_netdev(dev);
-               if (!IS_ERR(bp->tx_clk))
-                       clk_disable_unprepare(bp->tx_clk);
+               clk_disable_unprepare(bp->tx_clk);
                clk_disable_unprepare(bp->hclk);
                clk_disable_unprepare(bp->pclk);
                free_netdev(dev);
@@ -2497,8 +2825,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
        netif_carrier_off(netdev);
        netif_device_detach(netdev);
 
-       if (!IS_ERR(bp->tx_clk))
-               clk_disable_unprepare(bp->tx_clk);
+       clk_disable_unprepare(bp->tx_clk);
        clk_disable_unprepare(bp->hclk);
        clk_disable_unprepare(bp->pclk);
 
@@ -2513,8 +2840,7 @@ static int __maybe_unused macb_resume(struct device *dev)
 
        clk_prepare_enable(bp->pclk);
        clk_prepare_enable(bp->hclk);
-       if (!IS_ERR(bp->tx_clk))
-               clk_prepare_enable(bp->tx_clk);
+       clk_prepare_enable(bp->tx_clk);
 
        netif_device_attach(netdev);
 
index 83241c8ec5dce87419a612ee6831b71960d17f40..21e4147d8b5ca72efbe11fb6f7aa89d17a0fe5f9 100644 (file)
 
 /* Capability mask bits */
 #define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x00000001
+#define MACB_CAPS_USRIO_HAS_CLKEN              0x00000002
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII         0x00000004
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
@@ -752,6 +754,7 @@ struct macb_or_gem_ops {
 struct macb_config {
        u32                     caps;
        unsigned int            dma_burst_length;
+       int     (*init)(struct platform_device *pdev);
 };
 
 struct macb_queue {
@@ -822,15 +825,6 @@ struct macb {
        u64                     ethtool_stats[GEM_STATS_LEN];
 };
 
-extern const struct ethtool_ops macb_ethtool_ops;
-
-int macb_mii_init(struct macb *bp);
-int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-struct net_device_stats *macb_get_stats(struct net_device *dev);
-void macb_set_rx_mode(struct net_device *dev);
-void macb_set_hwaddr(struct macb *bp);
-void macb_get_hwaddr(struct macb *bp);
-
 static inline bool macb_is_gem(struct macb *bp)
 {
        return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
index 47bfea24b9e1b1bce64f07d5eb0424fc2a9b7e0f..63efa0dc45ba61b11f8e1a245513485412e50027 100644 (file)
@@ -47,9 +47,9 @@
 #define XGMAC_REMOTE_WAKE      0x00000700      /* Remote Wake-Up Frm Filter */
 #define XGMAC_PMT              0x00000704      /* PMT Control and Status */
 #define XGMAC_MMC_CTRL         0x00000800      /* XGMAC MMC Control */
-#define XGMAC_MMC_INTR_RX      0x00000804      /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_RX      0x00000804      /* Receive Interrupt */
 #define XGMAC_MMC_INTR_TX      0x00000808      /* Transmit Interrupt */
-#define XGMAC_MMC_INTR_MASK_RX 0x0000080c      /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c      /* Receive Interrupt Mask */
 #define XGMAC_MMC_INTR_MASK_TX 0x00000810      /* Transmit Interrupt Mask */
 
 /* Hardware TX Statistics Counters */
 #define XGMAC_FLOW_CTRL_PT_MASK        0xffff0000      /* Pause Time Mask */
 #define XGMAC_FLOW_CTRL_PT_SHIFT       16
 #define XGMAC_FLOW_CTRL_DZQP   0x00000080      /* Disable Zero-Quanta Phase */
-#define XGMAC_FLOW_CTRL_PLT    0x00000020      /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT    0x00000020      /* Pause Low Threshold */
 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030    /* PLT MASK */
 #define XGMAC_FLOW_CTRL_UP     0x00000008      /* Unicast Pause Frame Detect */
 #define XGMAC_FLOW_CTRL_RFE    0x00000004      /* Rx Flow Control Enable */
 /* XGMAC Operation Mode Register */
 #define XGMAC_OMR_TSF          0x00200000      /* TX FIFO Store and Forward */
 #define XGMAC_OMR_FTF          0x00100000      /* Flush Transmit FIFO */
-#define XGMAC_OMR_TTC          0x00020000      /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC          0x00020000      /* Transmit Threshold Ctrl */
 #define XGMAC_OMR_TTC_MASK     0x00030000
-#define XGMAC_OMR_RFD          0x00006000      /* FC Deactivation Threshhold */
-#define XGMAC_OMR_RFD_MASK     0x00007000      /* FC Deact Threshhold MASK */
-#define XGMAC_OMR_RFA          0x00000600      /* FC Activation Threshhold */
-#define XGMAC_OMR_RFA_MASK     0x00000E00      /* FC Act Threshhold MASK */
+#define XGMAC_OMR_RFD          0x00006000      /* FC Deactivation Threshold */
+#define XGMAC_OMR_RFD_MASK     0x00007000      /* FC Deact Threshold MASK */
+#define XGMAC_OMR_RFA          0x00000600      /* FC Activation Threshold */
+#define XGMAC_OMR_RFA_MASK     0x00000E00      /* FC Act Threshold MASK */
 #define XGMAC_OMR_EFC          0x00000100      /* Enable Hardware FC */
 #define XGMAC_OMR_FEF          0x00000080      /* Forward Error Frames */
 #define XGMAC_OMR_DT           0x00000040      /* Drop TCP/IP csum Errors */
 #define XGMAC_OMR_RSF          0x00000020      /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC_256      0x00000018      /* RX Threshhold Ctrl */
-#define XGMAC_OMR_RTC_MASK     0x00000018      /* RX Threshhold Ctrl MASK */
+#define XGMAC_OMR_RTC_256      0x00000018      /* RX Threshold Ctrl */
+#define XGMAC_OMR_RTC_MASK     0x00000018      /* RX Threshold Ctrl MASK */
 
 /* XGMAC HW Features Register */
 #define DMA_HW_FEAT_TXCOESEL   0x00010000      /* TX Checksum offload */
index 184a8d545ac4230e07788fbb831be2dcdfa57f83..a22768c94200efe915016c85d18da2eaf806b5ff 100644 (file)
@@ -840,7 +840,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  *     Read the specified number of 32-bit words from the serial flash.
  *     If @byte_oriented is set the read data is stored as a byte array
  *     (i.e., big-endian), otherwise as 32-bit words in the platform's
- *     natural endianess.
+ *     natural endianness.
  */
 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
                         unsigned int nwords, u32 *data, int byte_oriented)
index 9062a843424688beabaa21e46b3e210387658c81..c308429dd9c7fa0aebf2cee3b951f71f3863d939 100644 (file)
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
 }
 
 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
-                                  int addr_len)
+                                  u8 v6)
 {
-       return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
-                               ipv6_clip_hash(ctbl, addr);
+       return v6 ? ipv6_clip_hash(ctbl, addr) :
+                       ipv4_clip_hash(ctbl, addr);
 }
 
 static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
        struct clip_entry *ce, *cte;
        u32 *addr = (u32 *)lip;
        int hash;
-       int addr_len;
-       int ret = 0;
+       int ret = -1;
 
        if (!ctbl)
                return 0;
 
-       if (v6)
-               addr_len = 16;
-       else
-               addr_len = 4;
-
-       hash = clip_addr_hash(ctbl, addr, addr_len);
+       hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
        list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-               if (addr_len == cte->addr_len &&
-                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+               if (cte->addr6.sin6_family == AF_INET6 && v6)
+                       ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+                                    sizeof(struct in6_addr));
+               else if (cte->addr.sin_family == AF_INET && !v6)
+                       ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+                                    sizeof(struct in_addr));
+               if (!ret) {
                        ce = cte;
                        read_unlock_bh(&ctbl->lock);
                        goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
                spin_lock_init(&ce->lock);
                atomic_set(&ce->refcnt, 0);
                atomic_dec(&ctbl->nfree);
-               ce->addr_len = addr_len;
-               memcpy(ce->addr, lip, addr_len);
                list_add_tail(&ce->list, &ctbl->hash_list[hash]);
                if (v6) {
+                       ce->addr6.sin6_family = AF_INET6;
+                       memcpy(ce->addr6.sin6_addr.s6_addr,
+                              lip, sizeof(struct in6_addr));
                        ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
                        if (ret) {
                                write_unlock_bh(&ctbl->lock);
                                return ret;
                        }
+               } else {
+                       ce->addr.sin_family = AF_INET;
+                       memcpy((char *)(&ce->addr.sin_addr), lip,
+                              sizeof(struct in_addr));
                }
        } else {
                write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
        struct clip_entry *ce, *cte;
        u32 *addr = (u32 *)lip;
        int hash;
-       int addr_len;
-
-       if (v6)
-               addr_len = 16;
-       else
-               addr_len = 4;
+       int ret = -1;
 
-       hash = clip_addr_hash(ctbl, addr, addr_len);
+       hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
        list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-               if (addr_len == cte->addr_len &&
-                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+               if (cte->addr6.sin6_family == AF_INET6 && v6)
+                       ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+                                    sizeof(struct in6_addr));
+               else if (cte->addr.sin_family == AF_INET && !v6)
+                       ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+                                    sizeof(struct in_addr));
+               if (!ret) {
                        ce = cte;
                        read_unlock_bh(&ctbl->lock);
                        goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
        for (i = 0 ; i < ctbl->clipt_size;  ++i) {
                list_for_each_entry(ce, &ctbl->hash_list[i], list) {
                        ip[0] = '\0';
-                       if (ce->addr_len == 16)
-                               sprintf(ip, "%pI6c", ce->addr);
-                       else
-                               sprintf(ip, "%pI4c", ce->addr);
+                       sprintf(ip, "%pISc", &ce->addr);
                        seq_printf(seq, "%-25s   %u\n", ip,
                                   atomic_read(&ce->refcnt));
                }
index 2eaba0161cf8104eb8cbf9756112174fd1275b38..35eb43c6bcbbe37e5f934a767154bc4f4fe2f5c7 100644 (file)
@@ -14,8 +14,10 @@ struct clip_entry {
        spinlock_t lock;        /* Hold while modifying clip reference */
        atomic_t refcnt;
        struct list_head list;
-       u32 addr[4];
-       int addr_len;
+       union {
+               struct sockaddr_in addr;
+               struct sockaddr_in6 addr6;
+       };
 };
 
 struct clip_tbl {
index d6cda17efe6ef475a5579d8248e01a3158bbc272..4555634b985d54c6be1b1b16ac9ef7a8af70c05f 100644 (file)
@@ -369,7 +369,7 @@ enum {
        MAX_OFLD_QSETS = 16,          /* # of offload Tx/Rx queue sets */
        MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
        MAX_RDMA_QUEUES = NCHAN,      /* # of streaming RDMA Rx queues */
-       MAX_RDMA_CIQS = NCHAN,        /* # of  RDMA concentrator IQs */
+       MAX_RDMA_CIQS = 32,        /* # of  RDMA concentrator IQs */
        MAX_ISCSI_QUEUES = NCHAN,     /* # of streaming iSCSI Rx queues */
 };
 
@@ -599,8 +599,8 @@ struct sge {
        u16 rdmaqs;                 /* # of available RDMA Rx queues */
        u16 rdmaciqs;               /* # of available RDMA concentrator IQs */
        u16 ofld_rxq[MAX_OFLD_QSETS];
-       u16 rdma_rxq[NCHAN];
-       u16 rdma_ciq[NCHAN];
+       u16 rdma_rxq[MAX_RDMA_QUEUES];
+       u16 rdma_ciq[MAX_RDMA_CIQS];
        u16 timer_val[SGE_NTIMERS];
        u8 counter_val[SGE_NCOUNTERS];
        u32 fl_pg_order;            /* large page allocation size */
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 #define T4_MEMORY_WRITE        0
 #define T4_MEMORY_READ 1
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
-                __be32 *buf, int dir);
+                void *buf, int dir);
 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
                                  u32 len, __be32 *buf)
 {
index 78854ceb0870a29004ab04261fa26b296b958d4e..0918c16bb1548e76f97d450fefaefaa2dfb8fd23 100644 (file)
@@ -1769,6 +1769,8 @@ do { \
                int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
 
                S("QType:", "RDMA-CPL");
+               S("Interface:",
+                 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
                R("RspQ ID:", rspq.abs_id);
                R("RspQ size:", rspq.size);
                R("RspQE size:", rspq.iqe_len);
@@ -1788,6 +1790,8 @@ do { \
                int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
 
                S("QType:", "RDMA-CIQ");
+               S("Interface:",
+                 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
                R("RspQ ID:", rspq.abs_id);
                R("RspQ size:", rspq.size);
                R("RspQE size:", rspq.iqe_len);
index a22cf932ca3536920798ef50241fd9b43c26857a..4af8a9fd75ae8a7b1f2bae7d23effe575d580a7a 100644 (file)
@@ -957,6 +957,28 @@ static void enable_rx(struct adapter *adap)
        }
 }
 
+static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
+                          unsigned int nq, unsigned int per_chan, int msi_idx,
+                          u16 *ids)
+{
+       int i, err;
+
+       for (i = 0; i < nq; i++, q++) {
+               if (msi_idx > 0)
+                       msi_idx++;
+               err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+                                      adap->port[i / per_chan],
+                                      msi_idx, q->fl.size ? &q->fl : NULL,
+                                      uldrx_handler);
+               if (err)
+                       return err;
+               memset(&q->stats, 0, sizeof(q->stats));
+               if (ids)
+                       ids[i] = q->rspq.abs_id;
+       }
+       return 0;
+}
+
 /**
  *     setup_sge_queues - configure SGE Tx/Rx/response queues
  *     @adap: the adapter
@@ -1018,51 +1040,27 @@ freeout:        t4_free_sge_resources(adap);
 
        j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
        for_each_ofldrxq(s, i) {
-               struct sge_ofld_rxq *q = &s->ofldrxq[i];
-               struct net_device *dev = adap->port[i / j];
-
-               if (msi_idx > 0)
-                       msi_idx++;
-               err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
-                                      q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
-               if (err)
-                       goto freeout;
-               memset(&q->stats, 0, sizeof(q->stats));
-               s->ofld_rxq[i] = q->rspq.abs_id;
-               err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+               err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
+                                           adap->port[i / j],
                                            s->fw_evtq.cntxt_id);
                if (err)
                        goto freeout;
        }
 
-       for_each_rdmarxq(s, i) {
-               struct sge_ofld_rxq *q = &s->rdmarxq[i];
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
+       err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+       if (err) \
+               goto freeout; \
+       if (msi_idx > 0) \
+               msi_idx += nq; \
+} while (0)
 
-               if (msi_idx > 0)
-                       msi_idx++;
-               err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
-                                      msi_idx, q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
-               if (err)
-                       goto freeout;
-               memset(&q->stats, 0, sizeof(q->stats));
-               s->rdma_rxq[i] = q->rspq.abs_id;
-       }
+       ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+       ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+       j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
+       ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
 
-       for_each_rdmaciq(s, i) {
-               struct sge_ofld_rxq *q = &s->rdmaciq[i];
-
-               if (msi_idx > 0)
-                       msi_idx++;
-               err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
-                                      msi_idx, q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
-               if (err)
-                       goto freeout;
-               memset(&q->stats, 0, sizeof(q->stats));
-               s->rdma_ciq[i] = q->rspq.abs_id;
-       }
+#undef ALLOC_OFLD_RXQS
 
        for_each_port(adap, i) {
                /*
@@ -5368,7 +5366,7 @@ static int adap_init0(struct adapter *adap)
                adap->tids.stid_base = val[1];
                adap->tids.nstids = val[2] - val[1] + 1;
                /*
-                * Setup server filter region. Divide the availble filter
+                * Setup server filter region. Divide the available filter
                 * region into two parts. Regular filters get 1/3rd and server
                 * filters get 2/3rd part. This is only enabled if workarond
                 * path is enabled.
@@ -5705,7 +5703,16 @@ static void cfg_queues(struct adapter *adap)
                        s->ofldqsets = adap->params.nports;
                /* For RDMA one Rx queue per channel suffices */
                s->rdmaqs = adap->params.nports;
-               s->rdmaciqs = adap->params.nports;
+               /* Try and allow at least 1 CIQ per cpu rounding down
+                * to the number of ports, with a minimum of 1 per port.
+                * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
+                * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
+                * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
+                */
+               s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
+               s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
+                               adap->params.nports;
+               s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
        }
 
        for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5791,12 +5798,17 @@ static void reduce_ethqs(struct adapter *adap, int n)
 static int enable_msix(struct adapter *adap)
 {
        int ofld_need = 0;
-       int i, want, need;
+       int i, want, need, allocated;
        struct sge *s = &adap->sge;
        unsigned int nchan = adap->params.nports;
-       struct msix_entry entries[MAX_INGQ + 1];
+       struct msix_entry *entries;
 
-       for (i = 0; i < ARRAY_SIZE(entries); ++i)
+       entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+                         GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       for (i = 0; i < MAX_INGQ + 1; ++i)
                entries[i].entry = i;
 
        want = s->max_ethqsets + EXTRA_VECS;
@@ -5813,29 +5825,39 @@ static int enable_msix(struct adapter *adap)
 #else
        need = adap->params.nports + EXTRA_VECS + ofld_need;
 #endif
-       want = pci_enable_msix_range(adap->pdev, entries, need, want);
-       if (want < 0)
-               return want;
+       allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
+       if (allocated < 0) {
+               dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
+                        " not using MSI-X\n");
+               kfree(entries);
+               return allocated;
+       }
 
-       /*
-        * Distribute available vectors to the various queue groups.
+       /* Distribute available vectors to the various queue groups.
         * Every group gets its minimum requirement and NIC gets top
         * priority for leftovers.
         */
-       i = want - EXTRA_VECS - ofld_need;
+       i = allocated - EXTRA_VECS - ofld_need;
        if (i < s->max_ethqsets) {
                s->max_ethqsets = i;
                if (i < s->ethqsets)
                        reduce_ethqs(adap, i);
        }
        if (is_offload(adap)) {
-               i = want - EXTRA_VECS - s->max_ethqsets;
-               i -= ofld_need - nchan;
+               if (allocated < want) {
+                       s->rdmaqs = nchan;
+                       s->rdmaciqs = nchan;
+               }
+
+               /* leftovers go to OFLD */
+               i = allocated - EXTRA_VECS - s->max_ethqsets -
+                   s->rdmaqs - s->rdmaciqs;
                s->ofldqsets = (i / nchan) * nchan;  /* round down */
        }
-       for (i = 0; i < want; ++i)
+       for (i = 0; i < allocated; ++i)
                adap->msix_info[i].vec = entries[i].vector;
 
+       kfree(entries);
        return 0;
 }
 
index 4d643b65265e8ee0ad12ed886cbb2b77b9d2557b..1498d078c319c320bb793fa844cad27c3cf99b6c 100644 (file)
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  *     @addr: address within indicated memory type
  *     @len: amount of memory to transfer
- *     @buf: host memory buffer
+ *     @hbuf: host memory buffer
  *     @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  *
  *     Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  *     caller's responsibility to perform appropriate byte order conversions.
  */
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
-                u32 len, __be32 *buf, int dir)
+                u32 len, void *hbuf, int dir)
 {
        u32 pos, offset, resid, memoffset;
        u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+       u32 *buf;
 
        /* Argument sanity checks ...
         */
-       if (addr & 0x3)
+       if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
                return -EINVAL;
+       buf = (u32 *)hbuf;
 
        /* It's convenient to be able to handle lengths which aren't a
         * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 
        /* Transfer data to/from the adapter as long as there's an integral
         * number of 32-bit transfers to complete.
+        *
+        * A note on Endianness issues:
+        *
+        * The "register" reads and writes below from/to the PCI-E Memory
+        * Window invoke the standard adapter Big-Endian to PCI-E Link
+        * Little-Endian "swizzel."  As a result, if we have the following
+        * data in adapter memory:
+        *
+        *     Memory:  ... | b0 | b1 | b2 | b3 | ...
+        *     Address:      i+0  i+1  i+2  i+3
+        *
+        * Then a read of the adapter memory via the PCI-E Memory Window
+        * will yield:
+        *
+        *     x = readl(i)
+        *         31                  0
+        *         [ b3 | b2 | b1 | b0 ]
+        *
+        * If this value is stored into local memory on a Little-Endian system
+        * it will show up correctly in local memory as:
+        *
+        *     ( ..., b0, b1, b2, b3, ... )
+        *
+        * But on a Big-Endian system, the store will show up in memory
+        * incorrectly swizzled as:
+        *
+        *     ( ..., b3, b2, b1, b0, ... )
+        *
+        * So we need to account for this in the reads and writes to the
+        * PCI-E Memory Window below by undoing the register read/write
+        * swizzels.
         */
        while (len > 0) {
                if (dir == T4_MEMORY_READ)
-                       *buf++ = (__force __be32) t4_read_reg(adap,
-                                                       mem_base + offset);
+                       *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+                                               mem_base + offset));
                else
                        t4_write_reg(adap, mem_base + offset,
-                                    (__force u32) *buf++);
+                                    (__force u32)cpu_to_le32(*buf++));
                offset += sizeof(__be32);
                len -= sizeof(__be32);
 
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         */
        if (resid) {
                union {
-                       __be32 word;
+                       u32 word;
                        char byte[4];
                } last;
                unsigned char *bp;
                int i;
 
                if (dir == T4_MEMORY_READ) {
-                       last.word = (__force __be32) t4_read_reg(adap,
-                                                       mem_base + offset);
+                       last.word = le32_to_cpu(
+                                       (__force __le32)t4_read_reg(adap,
+                                               mem_base + offset));
                        for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
                                bp[i] = last.byte[i];
                } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
                        for (i = resid; i < 4; i++)
                                last.byte[i] = 0;
                        t4_write_reg(adap, mem_base + offset,
-                                    (__force u32) last.word);
+                                    (__force u32)cpu_to_le32(last.word));
                }
        }
 
@@ -833,7 +867,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  *     Read the specified number of 32-bit words from the serial flash.
  *     If @byte_oriented is set the read data is stored as a byte array
  *     (i.e., big-endian), otherwise as 32-bit words in the platform's
- *     natural endianess.
+ *     natural endianness.
  */
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
                  unsigned int nwords, u32 *data, int byte_oriented)
@@ -3524,7 +3558,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
         * For the single-MTU buffers in unpacked mode we need to include
         * space for the SGE Control Packet Shift, 14 byte Ethernet header,
         * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
-        * Padding boundry.  All of these are accommodated in the Factory
+        * Padding boundary.  All of these are accommodated in the Factory
         * Default Firmware Configuration File but we need to adjust it for
         * this host's cache line size.
         */
@@ -4495,7 +4529,7 @@ int t4_init_tp_params(struct adapter *adap)
                                                               PROTOCOL_F);
 
        /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
-        * represents the presense of an Outer VLAN instead of a VNIC ID.
+        * represents the presence of an Outer VLAN instead of a VNIC ID.
         */
        if ((adap->params.tp.ingress_config & VNIC_F) == 0)
                adap->params.tp.vnic_shift = -1;
index 9b353a88cbdab13a7ce85456e393edf197dd1740..d136ca6a0c8a1544dbdf938214bb5cedbdc2113f 100644 (file)
@@ -36,7 +36,7 @@
 #define _T4FW_INTERFACE_H_
 
 enum fw_retval {
-       FW_SUCCESS              = 0,    /* completed sucessfully */
+       FW_SUCCESS              = 0,    /* completed successfully */
        FW_EPERM                = 1,    /* operation not permitted */
        FW_ENOENT               = 2,    /* no such file or directory */
        FW_EIO                  = 5,    /* input/output error; hw bad */
index 0545f0de1c52be282af53d6a5f03916f93ad7013..5ba14b32c3700ff59ce518eca54173ded698205a 100644 (file)
@@ -875,7 +875,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
         * Write Header (incorporated as part of the cpl_tx_pkt_lso and
         * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
         * message or, if we're doing a Large Send Offload, an LSO CPL message
-        * with an embeded TX Packet Write CPL message.
+        * with an embedded TX Packet Write CPL message.
         */
        flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
        if (skb_shinfo(skb)->gso_size)
index 1b5506df35b15ab74eaf1ecea220da4ad6278a3f..c21e2e954ad8b84437e5643e59be58693d9e5f2f 100644 (file)
@@ -339,7 +339,7 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
  *      @adapter: the adapter
  *
  *     Issues a reset command to FW.  For a Physical Function this would
- *     result in the Firmware reseting all of its state.  For a Virtual
+ *     result in the Firmware resetting all of its state.  For a Virtual
  *     Function this just resets the state associated with the VF.
  */
 int t4vf_fw_reset(struct adapter *adapter)
index d1c025fd972607eaedbe51c86f157aef54456410..60383040d6c663ae8293234ca8de4aef5527d84e 100644 (file)
@@ -1578,7 +1578,7 @@ out1:
 
 #ifndef CONFIG_CS89x0_PLATFORM
 /*
- * This function converts the I/O port addres used by the cs89x0_probe() and
+ * This function converts the I/O port address used by the cs89x0_probe() and
  * init_module() functions to the I/O memory address used by the
  * cs89x0_probe1() function.
  */
index 9cbe038a388ea62a6f4552e7f088dc5816dc5b5c..204bd182473bceaaabaa5b1eba5ed618de751808 100644 (file)
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
        }
 
        if (ENIC_TEST_INTR(pba, notify_intr)) {
-               vnic_intr_return_all_credits(&enic->intr[notify_intr]);
                enic_notify_check(enic);
+               vnic_intr_return_all_credits(&enic->intr[notify_intr]);
        }
 
        if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
        struct enic *enic = data;
        unsigned int intr = enic_msix_notify_intr(enic);
 
-       vnic_intr_return_all_credits(&enic->intr[intr]);
        enic_notify_check(enic);
+       vnic_intr_return_all_credits(&enic->intr[intr]);
 
        return IRQ_HANDLED;
 }
@@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
                } else {
                        memset(pp, 0, sizeof(*pp));
                        if (vf == PORT_SELF_VF)
-                               memset(netdev->dev_addr, 0, ETH_ALEN);
+                               eth_zero_addr(netdev->dev_addr);
                }
        } else {
                /* Set flag to indicate that the port assoc/disassoc
@@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
 
                /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
                if (pp->request == PORT_REQUEST_DISASSOCIATE) {
-                       memset(pp->mac_addr, 0, ETH_ALEN);
+                       eth_zero_addr(pp->mac_addr);
                        if (vf == PORT_SELF_VF)
-                               memset(netdev->dev_addr, 0, ETH_ALEN);
+                               eth_zero_addr(netdev->dev_addr);
                }
        }
 
        if (vf == PORT_SELF_VF)
-               memset(pp->vf_mac, 0, ETH_ALEN);
+               eth_zero_addr(pp->vf_mac);
 
        return err;
 }
index 50a00777228e12b91b33f8bb3b7794f4f07de42c..afd8e78e024e3d2cb6015202d3678210334358ff 100644 (file)
@@ -653,7 +653,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
        if ( !(db->media_mode & DMFE_AUTO) )
                db->op_mode = db->media_mode;   /* Force Mode */
 
-       /* Initialize Transmit/Receive decriptor and CR3/4 */
+       /* Initialize Transmit/Receive descriptor and CR3/4 */
        dmfe_descriptor_init(dev);
 
        /* Init CR6 to program DM910x operation */
index 1c5916b13778a96e489ee3ec1bcb2d1acee63cd2..2c30c0c83f984a2d41204c637bb9f2dbe797bc00 100644 (file)
@@ -564,7 +564,7 @@ static void uli526x_init(struct net_device *dev)
        if ( !(db->media_mode & ULI526X_AUTO) )
                db->op_mode = db->media_mode;           /* Force Mode */
 
-       /* Initialize Transmit/Receive decriptor and CR3/4 */
+       /* Initialize Transmit/Receive descriptor and CR3/4 */
        uli526x_descriptor_init(dev, ioaddr);
 
        /* Init CR6 to program M526X operation */
index fac806a15a61bd5b7520f5817ce18963bb6e8694..996bbc6a244ffdb7683dba9d910ccf7b63060202 100644 (file)
@@ -87,6 +87,7 @@
 #define BE3_MAX_EVT_QS         16
 #define BE3_SRIOV_MAX_EVT_QS   8
 
+#define MAX_RSS_IFACES         15
 #define MAX_RX_QS              32
 #define MAX_EVT_QS             32
 #define MAX_TX_QS              32
@@ -411,8 +412,11 @@ struct be_resources {
        u16 max_tx_qs;
        u16 max_rss_qs;
        u16 max_rx_qs;
+       u16 max_cq_count;
        u16 max_uc_mac;         /* Max UC MACs programmable */
        u16 max_vlans;          /* Number of vlans supported */
+       u16 max_iface_count;
+       u16 max_mcc_count;
        u16 max_evt_qs;
        u32 if_cap_flags;
        u32 vf_if_cap_flags;    /* VF if capability flags */
@@ -488,6 +492,8 @@ struct be_adapter {
 
        /* Rx rings */
        u16 num_rx_qs;
+       u16 num_rss_qs;
+       u16 need_def_rxq;
        struct be_rx_obj rx_obj[MAX_RX_QS];
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
@@ -635,9 +641,8 @@ extern const struct ethtool_ops be_ethtool_ops;
        for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;  \
                i++, rxo++)
 
-/* Skip the default non-rss queue (last one)*/
 #define for_all_rss_queues(adapter, rxo, i)                            \
-       for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
+       for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \
                i++, rxo++)
 
 #define for_all_tx_queues(adapter, txo, i)                             \
index f6db7b3e9b709e3a155e741cad3ccd3afaeb5884..dc278391a391e42821342702d72eb7fe70738d44 100644 (file)
@@ -3021,7 +3021,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
                mac_count = resp->true_mac_count + resp->pseudo_mac_count;
                /* Mac list returned could contain one or more active mac_ids
-                * or one or more true or pseudo permanant mac addresses.
+                * or one or more true or pseudo permanent mac addresses.
                 * If an active mac_id is present, return first active mac_id
                 * found.
                 */
@@ -3076,7 +3076,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
        int status;
        bool pmac_valid = false;
 
-       memset(mac, 0, ETH_ALEN);
+       eth_zero_addr(mac);
 
        if (BEx_chip(adapter)) {
                if (be_physfn(adapter))
@@ -3577,12 +3577,12 @@ static void be_copy_nic_desc(struct be_resources *res,
        res->max_rss_qs = le16_to_cpu(desc->rssq_count);
        res->max_rx_qs = le16_to_cpu(desc->rq_count);
        res->max_evt_qs = le16_to_cpu(desc->eq_count);
+       res->max_cq_count = le16_to_cpu(desc->cq_count);
+       res->max_iface_count = le16_to_cpu(desc->iface_count);
+       res->max_mcc_count = le16_to_cpu(desc->mcc_count);
        /* Clear flags that driver is not interested in */
        res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
                                BE_IF_CAP_FLAGS_WANT;
-       /* Need 1 RXQ as the default RXQ */
-       if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
-               res->max_rss_qs -= 1;
 }
 
 /* Uses Mbox */
@@ -3644,7 +3644,7 @@ err:
 
 /* Will use MBOX only if MCCQ has not been created */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
-                             struct be_resources *res, u8 domain)
+                             struct be_resources *res, u8 query, u8 domain)
 {
        struct be_cmd_resp_get_profile_config *resp;
        struct be_cmd_req_get_profile_config *req;
@@ -3654,7 +3654,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        struct be_nic_res_desc *nic;
        struct be_mcc_wrb wrb = {0};
        struct be_dma_mem cmd;
-       u32 desc_count;
+       u16 desc_count;
        int status;
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
@@ -3673,12 +3673,19 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                req->hdr.version = 1;
        req->type = ACTIVE_PROFILE_TYPE;
 
+       /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
+        * descriptors with all bits set to "1" for the fields which can be
+        * modified using SET_PROFILE_CONFIG cmd.
+        */
+       if (query == RESOURCE_MODIFIABLE)
+               req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
+
        status = be_cmd_notify_wait(adapter, &wrb);
        if (status)
                goto err;
 
        resp = cmd.va;
-       desc_count = le32_to_cpu(resp->desc_count);
+       desc_count = le16_to_cpu(resp->desc_count);
 
        pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
                                desc_count);
@@ -3803,23 +3810,80 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
                                         1, version, domain);
 }
 
+static void be_fill_vf_res_template(struct be_adapter *adapter,
+                                   struct be_resources pool_res,
+                                   u16 num_vfs, u16 num_vf_qs,
+                                   struct be_nic_res_desc *nic_vft)
+{
+       u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
+       struct be_resources res_mod = {0};
+
+       /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+        * which are modifiable using SET_PROFILE_CONFIG cmd.
+        */
+       be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
+
+       /* If RSS IFACE capability flags are modifiable for a VF, set the
+        * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+        * more than 1 RSSQ is available for a VF.
+        * Otherwise, provision only 1 queue pair for VF.
+        */
+       if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+               nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+               if (num_vf_qs > 1) {
+                       vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+                       if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+                               vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+               } else {
+                       vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+                                            BE_IF_FLAGS_DEFQ_RSS);
+               }
+
+               nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
+       } else {
+               num_vf_qs = 1;
+       }
+
+       nic_vft->rq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->txq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
+       nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
+                                       (num_vfs + 1));
+
+       /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+        * among the PF and it's VFs, if the fields are changeable
+        */
+       if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+               nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
+                                                        (num_vfs + 1));
+
+       if (res_mod.max_vlans == FIELD_MODIFIABLE)
+               nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
+                                                 (num_vfs + 1));
+
+       if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+               nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
+                                                  (num_vfs + 1));
+
+       if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+               nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
+                                                (num_vfs + 1));
+}
+
 int be_cmd_set_sriov_config(struct be_adapter *adapter,
-                           struct be_resources res, u16 num_vfs)
+                           struct be_resources pool_res, u16 num_vfs,
+                           u16 num_vf_qs)
 {
        struct {
                struct be_pcie_res_desc pcie;
                struct be_nic_res_desc nic_vft;
        } __packed desc;
-       u16 vf_q_count;
-
-       if (BEx_chip(adapter) || lancer_chip(adapter))
-               return 0;
 
        /* PF PCIE descriptor */
        be_reset_pcie_desc(&desc.pcie);
        desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
        desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
-       desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
        desc.pcie.pf_num = adapter->pdev->devfn;
        desc.pcie.sriov_state = num_vfs ? 1 : 0;
        desc.pcie.num_vfs = cpu_to_le16(num_vfs);
@@ -3828,32 +3892,12 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
        be_reset_nic_desc(&desc.nic_vft);
        desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
        desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
-       desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
-                               (1 << NOSV_SHIFT);
+       desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
        desc.nic_vft.pf_num = adapter->pdev->devfn;
        desc.nic_vft.vf_num = 0;
 
-       if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
-               /* If number of VFs requested is 8 less than max supported,
-                * assign 8 queue pairs to the PF and divide the remaining
-                * resources evenly among the VFs
-                */
-               if (num_vfs < (be_max_vfs(adapter) - 8))
-                       vf_q_count = (res.max_rss_qs - 8) / num_vfs;
-               else
-                       vf_q_count = res.max_rss_qs / num_vfs;
-
-               desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
-               desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
-               desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
-               desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
-       } else {
-               desc.nic_vft.txq_count = cpu_to_le16(1);
-               desc.nic_vft.rq_count = cpu_to_le16(1);
-               desc.nic_vft.rssq_count = cpu_to_le16(0);
-               /* One CQ for each TX, RX and MCCQ */
-               desc.nic_vft.cq_count = cpu_to_le16(3);
-       }
+       be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
+                               &desc.nic_vft);
 
        return be_cmd_set_profile_config(adapter, &desc,
                                         2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
index db761e8e42a3224486ced01238f9fb6b61ad3579..53e903f37247cd5ae2290e543e668ce52d0e3676 100644 (file)
@@ -588,14 +588,15 @@ enum be_if_flags {
        BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
        BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
        BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
-       BE_IF_FLAGS_MULTICAST = 0x1000
+       BE_IF_FLAGS_MULTICAST = 0x1000,
+       BE_IF_FLAGS_DEFQ_RSS = 0x1000000
 };
 
 #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
                         BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                         BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
                         BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
-                        BE_IF_FLAGS_UNTAGGED)
+                        BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS)
 
 #define BE_IF_FLAGS_ALL_PROMISCUOUS    (BE_IF_FLAGS_PROMISCUOUS | \
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
@@ -2021,6 +2022,7 @@ struct be_cmd_req_set_ext_fat_caps {
 #define PORT_RESOURCE_DESC_TYPE_V1             0x55
 #define MAX_RESOURCE_DESC                      264
 
+#define IF_CAPS_FLAGS_VALID_SHIFT              0       /* IF caps valid */
 #define VFT_SHIFT                              3       /* VF template */
 #define IMM_SHIFT                              6       /* Immediate */
 #define NOSV_SHIFT                             7       /* No save */
@@ -2131,20 +2133,28 @@ struct be_cmd_resp_get_func_config {
        u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
 };
 
-#define ACTIVE_PROFILE_TYPE                    0x2
+enum {
+       RESOURCE_LIMITS,
+       RESOURCE_MODIFIABLE
+};
+
 struct be_cmd_req_get_profile_config {
        struct be_cmd_req_hdr hdr;
        u8 rsvd;
+#define ACTIVE_PROFILE_TYPE                    0x2
+#define QUERY_MODIFIABLE_FIELDS_TYPE           BIT(3)
        u8 type;
        u16 rsvd1;
 };
 
 struct be_cmd_resp_get_profile_config {
        struct be_cmd_resp_hdr hdr;
-       u32 desc_count;
+       __le16 desc_count;
+       u16 rsvd;
        u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
 };
 
+#define FIELD_MODIFIABLE                       0xFFFF
 struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
@@ -2344,7 +2354,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
 int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
-                             struct be_resources *res, u8 domain);
+                             struct be_resources *res, u8 query, u8 domain);
 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num);
@@ -2355,4 +2365,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
 int be_cmd_set_sriov_config(struct be_adapter *adapter,
-                           struct be_resources res, u16 num_vfs);
+                           struct be_resources res, u16 num_vfs,
+                           u16 num_vf_qs);
index 4d2de47007692a85e1477da07b98402b489bc312..b765c24625bf523fd7932be17f6dfa22840a8e46 100644 (file)
@@ -1097,7 +1097,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
                return status;
 
        if (be_multi_rxq(adapter)) {
-               for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+               for (j = 0; j < 128; j += adapter->num_rss_qs) {
                        for_all_rss_queues(adapter, rxo, i) {
                                if ((j + i) >= 128)
                                        break;
index b2277a4c7ddf7a4a9c088f31ca2a45dbd1b58a9e..5652b005947fcf444bf7a8d1926e0100c0fac184 100644 (file)
@@ -30,6 +30,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
 MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
+/* num_vfs module param is obsolete.
+ * Use sysfs method to enable/disable VFs.
+ */
 static unsigned int num_vfs;
 module_param(num_vfs, uint, S_IRUGO);
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -2454,13 +2457,19 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
        int rc, i;
 
        /* We can create as many RSS rings as there are EQs. */
-       adapter->num_rx_qs = adapter->num_evt_qs;
+       adapter->num_rss_qs = adapter->num_evt_qs;
+
+       /* We'll use RSS only if atleast 2 RSS rings are supported. */
+       if (adapter->num_rss_qs <= 1)
+               adapter->num_rss_qs = 0;
 
-       /* We'll use RSS only if atleast 2 RSS rings are supported.
-        * When RSS is used, we'll need a default RXQ for non-IP traffic.
+       adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
+
+       /* When the interface is not capable of RSS rings (and there is no
+        * need to create a default RXQ) we'll still need one RXQ
         */
-       if (adapter->num_rx_qs > 1)
-               adapter->num_rx_qs++;
+       if (adapter->num_rx_qs == 0)
+               adapter->num_rx_qs = 1;
 
        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
        for_all_rx_queues(adapter, rxo, i) {
@@ -2479,8 +2488,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
        }
 
        dev_info(&adapter->pdev->dev,
-                "created %d RSS queue(s) and 1 default RX queue\n",
-                adapter->num_rx_qs - 1);
+                "created %d RX queue(s)\n", adapter->num_rx_qs);
        return 0;
 }
 
@@ -3110,12 +3118,14 @@ static int be_rx_qs_create(struct be_adapter *adapter)
                        return rc;
        }
 
-       /* The FW would like the default RXQ to be created first */
-       rxo = default_rxo(adapter);
-       rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
-                              adapter->if_handle, false, &rxo->rss_id);
-       if (rc)
-               return rc;
+       if (adapter->need_def_rxq || !adapter->num_rss_qs) {
+               rxo = default_rxo(adapter);
+               rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+                                      rx_frag_size, adapter->if_handle,
+                                      false, &rxo->rss_id);
+               if (rc)
+                       return rc;
+       }
 
        for_all_rss_queues(adapter, rxo, i) {
                rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -3126,8 +3136,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        }
 
        if (be_multi_rxq(adapter)) {
-               for (j = 0; j < RSS_INDIR_TABLE_LEN;
-                       j += adapter->num_rx_qs - 1) {
+               for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
                        for_all_rss_queues(adapter, rxo, i) {
                                if ((j + i) >= RSS_INDIR_TABLE_LEN)
                                        break;
@@ -3218,7 +3227,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        int status = 0;
        u8 mac[ETH_ALEN];
 
-       memset(mac, 0, ETH_ALEN);
+       eth_zero_addr(mac);
 
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
        cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
@@ -3402,8 +3411,39 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
 }
 #endif
 
+static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+{
+       struct be_resources res = adapter->pool_res;
+       u16 num_vf_qs = 1;
+
+       /* Distribute the queue resources equally among the PF and it's VFs
+        * Do not distribute queue resources in multi-channel configuration.
+        */
+       if (num_vfs && !be_is_mc(adapter)) {
+               /* If number of VFs requested is 8 less than max supported,
+                * assign 8 queue pairs to the PF and divide the remaining
+                * resources evenly among the VFs
+                */
+               if (num_vfs < (be_max_vfs(adapter) - 8))
+                       num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
+               else
+                       num_vf_qs = res.max_rss_qs / num_vfs;
+
+               /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
+                * interfaces per port. Provide RSS on VFs, only if number
+                * of VFs requested is less than MAX_RSS_IFACES limit.
+                */
+               if (num_vfs >= MAX_RSS_IFACES)
+                       num_vf_qs = 1;
+       }
+       return num_vf_qs;
+}
+
 static int be_clear(struct be_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
+       u16 num_vf_qs;
+
        be_cancel_worker(adapter);
 
        if (sriov_enabled(adapter))
@@ -3412,9 +3452,14 @@ static int be_clear(struct be_adapter *adapter)
        /* Re-configure FW to distribute resources evenly across max-supported
         * number of VFs, only when VFs are not already enabled.
         */
-       if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
+       if (skyhawk_chip(adapter) && be_physfn(adapter) &&
+           !pci_vfs_assigned(pdev)) {
+               num_vf_qs = be_calculate_vf_qs(adapter,
+                                              pci_sriov_get_totalvfs(pdev));
                be_cmd_set_sriov_config(adapter, adapter->pool_res,
-                                       pci_sriov_get_totalvfs(adapter->pdev));
+                                       pci_sriov_get_totalvfs(pdev),
+                                       num_vf_qs);
+       }
 
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
@@ -3439,7 +3484,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
 
        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
-                  BE_IF_FLAGS_RSS;
+                  BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
 
        en_flags &= cap_flags;
 
@@ -3463,6 +3508,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
                        status = be_cmd_get_profile_config(adapter, &res,
+                                                          RESOURCE_LIMITS,
                                                           vf + 1);
                        if (!status)
                                cap_flags = res.if_cap_flags;
@@ -3629,7 +3675,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
                /* On a SuperNIC profile, the driver needs to use the
                 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
                 */
-               be_cmd_get_profile_config(adapter, &super_nic_res, 0);
+               be_cmd_get_profile_config(adapter, &super_nic_res,
+                                         RESOURCE_LIMITS, 0);
                /* Some old versions of BE3 FW don't report max_tx_qs value */
                res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
        } else {
@@ -3649,6 +3696,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
                res->max_evt_qs = 1;
 
        res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+       res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
        if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
                res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
 }
@@ -3668,13 +3716,12 @@ static void be_setup_init(struct be_adapter *adapter)
 
 static int be_get_sriov_config(struct be_adapter *adapter)
 {
-       struct device *dev = &adapter->pdev->dev;
        struct be_resources res = {0};
        int max_vfs, old_vfs;
 
-       /* Some old versions of BE3 FW don't report max_vfs value */
-       be_cmd_get_profile_config(adapter, &res, 0);
+       be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
 
+       /* Some old versions of BE3 FW don't report max_vfs value */
        if (BE3_chip(adapter) && !res.max_vfs) {
                max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
                res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
@@ -3682,35 +3729,49 @@ static int be_get_sriov_config(struct be_adapter *adapter)
 
        adapter->pool_res = res;
 
-       if (!be_max_vfs(adapter)) {
-               if (num_vfs)
-                       dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
-               adapter->num_vfs = 0;
-               return 0;
-       }
-
-       pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
-
-       /* validate num_vfs module param */
+       /* If during previous unload of the driver, the VFs were not disabled,
+        * then we cannot rely on the PF POOL limits for the TotalVFs value.
+        * Instead use the TotalVFs value stored in the pci-dev struct.
+        */
        old_vfs = pci_num_vf(adapter->pdev);
        if (old_vfs) {
-               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
-               if (old_vfs != num_vfs)
-                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+               dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
+                        old_vfs);
+
+               adapter->pool_res.max_vfs =
+                       pci_sriov_get_totalvfs(adapter->pdev);
                adapter->num_vfs = old_vfs;
-       } else {
-               if (num_vfs > be_max_vfs(adapter)) {
-                       dev_info(dev, "Resources unavailable to init %d VFs\n",
-                                num_vfs);
-                       dev_info(dev, "Limiting to %d VFs\n",
-                                be_max_vfs(adapter));
-               }
-               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
        }
 
        return 0;
 }
 
+static void be_alloc_sriov_res(struct be_adapter *adapter)
+{
+       int old_vfs = pci_num_vf(adapter->pdev);
+       u16 num_vf_qs;
+       int status;
+
+       be_get_sriov_config(adapter);
+
+       if (!old_vfs)
+               pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+       /* When the HW is in SRIOV capable configuration, the PF-pool
+        * resources are given to PF during driver load, if there are no
+        * old VFs. This facility is not available in BE3 FW.
+        * Also, this is done by FW in Lancer chip.
+        */
+       if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+               num_vf_qs = be_calculate_vf_qs(adapter, 0);
+               status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
+                                                num_vf_qs);
+               if (status)
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to optimize SRIOV resources\n");
+       }
+}
+
 static int be_get_resources(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
@@ -3731,12 +3792,23 @@ static int be_get_resources(struct be_adapter *adapter)
                if (status)
                        return status;
 
+               /* If a deafault RXQ must be created, we'll use up one RSSQ*/
+               if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
+                   !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
+                       res.max_rss_qs -= 1;
+
                /* If RoCE may be enabled stash away half the EQs for RoCE */
                if (be_roce_supported(adapter))
                        res.max_evt_qs /= 2;
                adapter->res = res;
        }
 
+       /* If FW supports RSS default queue, then skip creating non-RSS
+        * queue for non-IP traffic.
+        */
+       adapter->need_def_rxq = (be_if_cap_flags(adapter) &
+                                BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
+
        dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
                 be_max_txqs(adapter), be_max_rxqs(adapter),
                 be_max_rss(adapter), be_max_eqs(adapter),
@@ -3745,38 +3817,12 @@ static int be_get_resources(struct be_adapter *adapter)
                 be_max_uc(adapter), be_max_mc(adapter),
                 be_max_vlans(adapter));
 
+       /* Sanitize cfg_num_qs based on HW and platform limits */
+       adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
+                                   be_max_qs(adapter));
        return 0;
 }
 
-static void be_sriov_config(struct be_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-       int status;
-
-       status = be_get_sriov_config(adapter);
-       if (status) {
-               dev_err(dev, "Failed to query SR-IOV configuration\n");
-               dev_err(dev, "SR-IOV cannot be enabled\n");
-               return;
-       }
-
-       /* When the HW is in SRIOV capable configuration, the PF-pool
-        * resources are equally distributed across the max-number of
-        * VFs. The user may request only a subset of the max-vfs to be
-        * enabled. Based on num_vfs, redistribute the resources across
-        * num_vfs so that each VF will have access to more number of
-        * resources. This facility is not available in BE3 FW.
-        * Also, this is done by FW in Lancer chip.
-        */
-       if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
-               status = be_cmd_set_sriov_config(adapter,
-                                                adapter->pool_res,
-                                                adapter->num_vfs);
-               if (status)
-                       dev_err(dev, "Failed to optimize SR-IOV resources\n");
-       }
-}
-
 static int be_get_config(struct be_adapter *adapter)
 {
        int status, level;
@@ -3807,9 +3853,6 @@ static int be_get_config(struct be_adapter *adapter)
                                 "Using profile 0x%x\n", profile_id);
        }
 
-       if (!BE2_chip(adapter) && be_physfn(adapter))
-               be_sriov_config(adapter);
-
        status = be_get_resources(adapter);
        if (status)
                return status;
@@ -3819,9 +3862,6 @@ static int be_get_config(struct be_adapter *adapter)
        if (!adapter->pmac_id)
                return -ENOMEM;
 
-       /* Sanitize cfg_num_qs based on HW and platform limits */
-       adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
-
        return 0;
 }
 
@@ -3996,6 +4036,9 @@ static int be_setup(struct be_adapter *adapter)
        if (!lancer_chip(adapter))
                be_cmd_req_native_mode(adapter);
 
+       if (!BE2_chip(adapter) && be_physfn(adapter))
+               be_alloc_sriov_res(adapter);
+
        status = be_get_config(adapter);
        if (status)
                goto err;
@@ -5217,7 +5260,6 @@ static int be_drv_init(struct be_adapter *adapter)
 
        /* Must be a power of 2 or else MODULO will BUG_ON */
        adapter->be_get_temp_freq = 64;
-       adapter->cfg_num_qs = netif_get_num_default_rss_queues();
 
        return 0;
 
@@ -5541,6 +5583,60 @@ err:
        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
 }
 
+static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       struct be_adapter *adapter = pci_get_drvdata(pdev);
+       u16 num_vf_qs;
+       int status;
+
+       if (!num_vfs)
+               be_vf_clear(adapter);
+
+       adapter->num_vfs = num_vfs;
+
+       if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
+               dev_warn(&pdev->dev,
+                        "Cannot disable VFs while they are assigned\n");
+               return -EBUSY;
+       }
+
+       /* When the HW is in SRIOV capable configuration, the PF-pool resources
+        * are equally distributed across the max-number of VFs. The user may
+        * request only a subset of the max-vfs to be enabled.
+        * Based on num_vfs, redistribute the resources across num_vfs so that
+        * each VF will have access to more number of resources.
+        * This facility is not available in BE3 FW.
+        * Also, this is done by FW in Lancer chip.
+        */
+       if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
+               num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+               status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
+                                                adapter->num_vfs, num_vf_qs);
+               if (status)
+                       dev_err(&pdev->dev,
+                               "Failed to optimize SR-IOV resources\n");
+       }
+
+       status = be_get_resources(adapter);
+       if (status)
+               return be_cmd_status(status);
+
+       /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+       rtnl_lock();
+       status = be_update_queues(adapter);
+       rtnl_unlock();
+       if (status)
+               return be_cmd_status(status);
+
+       if (adapter->num_vfs)
+               status = be_vf_setup(adapter);
+
+       if (!status)
+               return adapter->num_vfs;
+
+       return 0;
+}
+
 static const struct pci_error_handlers be_eeh_handlers = {
        .error_detected = be_eeh_err_detected,
        .slot_reset = be_eeh_reset,
@@ -5555,6 +5651,7 @@ static struct pci_driver be_driver = {
        .suspend = be_suspend,
        .resume = be_pci_resume,
        .shutdown = be_shutdown,
+       .sriov_configure = be_pci_sriov_configure,
        .err_handler = &be_eeh_handlers
 };
 
@@ -5568,6 +5665,11 @@ static int __init be_init_module(void)
                rx_frag_size = 2048;
        }
 
+       if (num_vfs > 0) {
+               pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
+               pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
+       }
+
        return pci_register_driver(&be_driver);
 }
 module_init(be_init_module);
index 1f9cf2345266b2f24a247768518773548d774f95..4585895ddc9a82a2912e01597b533b2b607dc83f 100644 (file)
@@ -136,7 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
                 */
                writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
 
-               /* It is recommended to doulbe check the TMODE field in the
+               /* It is recommended to double check the TMODE field in the
                 * TCSR register to be cleared before the first compare counter
                 * is written into TCCR register. Just add a double check.
                 */
index 43df78882e484e065706bd04c322fa8276d4c424..178e54028d1047eab23af38ba970303bc0cad357 100644 (file)
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
        struct phy_device *phydev = priv->phydev;
 
        if (unlikely(phydev->link != priv->oldlink ||
-                    phydev->duplex != priv->oldduplex ||
-                    phydev->speed != priv->oldspeed))
+                    (phydev->link && (phydev->duplex != priv->oldduplex ||
+                                      phydev->speed != priv->oldspeed))))
                gfar_update_link_state(priv);
 }
 
index e8a1adb7a96255bf8da1baa87b29514527c2764d..c05e50759621137fa3f9749a55c347381d54ed55 100644 (file)
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
        device_remove_file(&dev->dev, &dev_attr_remove_port);
 }
 
+static int ehea_reboot_notifier(struct notifier_block *nb,
+                               unsigned long action, void *unused)
+{
+       if (action == SYS_RESTART) {
+               pr_info("Reboot: freeing all eHEA resources\n");
+               ibmebus_unregister_driver(&ehea_driver);
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+       .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+                            unsigned long action, void *data)
+{
+       int ret = NOTIFY_BAD;
+       struct memory_notify *arg = data;
+
+       mutex_lock(&dlpar_mem_lock);
+
+       switch (action) {
+       case MEM_CANCEL_OFFLINE:
+               pr_info("memory offlining canceled");
+               /* Fall through: re-add canceled memory block */
+
+       case MEM_ONLINE:
+               pr_info("memory is going online");
+               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+               if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+                       goto out_unlock;
+               ehea_rereg_mrs();
+               break;
+
+       case MEM_GOING_OFFLINE:
+               pr_info("memory is going offline");
+               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+               if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+                       goto out_unlock;
+               ehea_rereg_mrs();
+               break;
+
+       default:
+               break;
+       }
+
+       ehea_update_firmware_handles();
+       ret = NOTIFY_OK;
+
+out_unlock:
+       mutex_unlock(&dlpar_mem_lock);
+       return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+       .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+       int i;
+
+       if (ehea_fw_handles.arr)
+               for (i = 0; i < ehea_fw_handles.num_entries; i++)
+                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+                                            ehea_fw_handles.arr[i].fwh,
+                                            FORCE_FREE);
+
+       if (ehea_bcmc_regs.arr)
+               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+                                             ehea_bcmc_regs.arr[i].port_id,
+                                             ehea_bcmc_regs.arr[i].reg_type,
+                                             ehea_bcmc_regs.arr[i].macaddr,
+                                             0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+       int ret = 0;
+
+       if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+               return 0;
+
+       ret = ehea_create_busmap();
+       if (ret) {
+               pr_info("ehea_create_busmap failed\n");
+               goto out;
+       }
+
+       ret = register_reboot_notifier(&ehea_reboot_nb);
+       if (ret) {
+               pr_info("register_reboot_notifier failed\n");
+               goto out;
+       }
+
+       ret = register_memory_notifier(&ehea_mem_nb);
+       if (ret) {
+               pr_info("register_memory_notifier failed\n");
+               goto out2;
+       }
+
+       ret = crash_shutdown_register(ehea_crash_handler);
+       if (ret) {
+               pr_info("crash_shutdown_register failed\n");
+               goto out3;
+       }
+
+       return 0;
+
+out3:
+       unregister_memory_notifier(&ehea_mem_nb);
+out2:
+       unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+       return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+       if (atomic_read(&ehea_memory_hooks_registered))
+               return;
+
+       unregister_reboot_notifier(&ehea_reboot_nb);
+       if (crash_shutdown_unregister(ehea_crash_handler))
+               pr_info("failed unregistering crash handler\n");
+       unregister_memory_notifier(&ehea_mem_nb);
+}
+
 static int ehea_probe_adapter(struct platform_device *dev)
 {
        struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
        int ret;
        int i;
 
+       ret = ehea_register_memory_hooks();
+       if (ret)
+               return ret;
+
        if (!dev || !dev->dev.of_node) {
                pr_err("Invalid ibmebus device probed\n");
                return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
        return 0;
 }
 
-static void ehea_crash_handler(void)
-{
-       int i;
-
-       if (ehea_fw_handles.arr)
-               for (i = 0; i < ehea_fw_handles.num_entries; i++)
-                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
-                                            ehea_fw_handles.arr[i].fwh,
-                                            FORCE_FREE);
-
-       if (ehea_bcmc_regs.arr)
-               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
-                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
-                                             ehea_bcmc_regs.arr[i].port_id,
-                                             ehea_bcmc_regs.arr[i].reg_type,
-                                             ehea_bcmc_regs.arr[i].macaddr,
-                                             0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
-                             unsigned long action, void *data)
-{
-       int ret = NOTIFY_BAD;
-       struct memory_notify *arg = data;
-
-       mutex_lock(&dlpar_mem_lock);
-
-       switch (action) {
-       case MEM_CANCEL_OFFLINE:
-               pr_info("memory offlining canceled");
-               /* Readd canceled memory block */
-       case MEM_ONLINE:
-               pr_info("memory is going online");
-               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-               if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
-                       goto out_unlock;
-               ehea_rereg_mrs();
-               break;
-       case MEM_GOING_OFFLINE:
-               pr_info("memory is going offline");
-               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-               if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
-                       goto out_unlock;
-               ehea_rereg_mrs();
-               break;
-       default:
-               break;
-       }
-
-       ehea_update_firmware_handles();
-       ret = NOTIFY_OK;
-
-out_unlock:
-       mutex_unlock(&dlpar_mem_lock);
-       return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
-       .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
-                               unsigned long action, void *unused)
-{
-       if (action == SYS_RESTART) {
-               pr_info("Reboot: freeing all eHEA resources\n");
-               ibmebus_unregister_driver(&ehea_driver);
-       }
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
-       .notifier_call = ehea_reboot_notifier,
-};
-
 static int check_module_parm(void)
 {
        int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
        if (ret)
                goto out;
 
-       ret = ehea_create_busmap();
-       if (ret)
-               goto out;
-
-       ret = register_reboot_notifier(&ehea_reboot_nb);
-       if (ret)
-               pr_info("failed registering reboot notifier\n");
-
-       ret = register_memory_notifier(&ehea_mem_nb);
-       if (ret)
-               pr_info("failed registering memory remove notifier\n");
-
-       ret = crash_shutdown_register(ehea_crash_handler);
-       if (ret)
-               pr_info("failed registering crash handler\n");
-
        ret = ibmebus_register_driver(&ehea_driver);
        if (ret) {
                pr_err("failed registering eHEA device driver on ebus\n");
-               goto out2;
+               goto out;
        }
 
        ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
        if (ret) {
                pr_err("failed to register capabilities attribute, ret=%d\n",
                       ret);
-               goto out3;
+               goto out2;
        }
 
        return ret;
 
-out3:
-       ibmebus_unregister_driver(&ehea_driver);
 out2:
-       unregister_memory_notifier(&ehea_mem_nb);
-       unregister_reboot_notifier(&ehea_reboot_nb);
-       crash_shutdown_unregister(ehea_crash_handler);
+       ibmebus_unregister_driver(&ehea_driver);
 out:
        return ret;
 }
 
 static void __exit ehea_module_exit(void)
 {
-       int ret;
-
        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
        ibmebus_unregister_driver(&ehea_driver);
-       unregister_reboot_notifier(&ehea_reboot_nb);
-       ret = crash_shutdown_unregister(ehea_crash_handler);
-       if (ret)
-               pr_info("failed unregistering crash handler\n");
-       unregister_memory_notifier(&ehea_mem_nb);
+       ehea_unregister_memory_hooks();
        kfree(ehea_fw_handles.arr);
        kfree(ehea_bcmc_regs.arr);
        ehea_destroy_busmap();
index 21978cc019e7c86dab83968ba994c0e9051c8e33..072426a72745a8fd984fa26dac7922a89ba189bb 100644 (file)
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
        return ret;
 }
 
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+       struct ibmveth_adapter *adapter = netdev_priv(dev);
+       struct sockaddr *addr = p;
+       u64 mac_address;
+       int rc;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+       rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+       if (rc) {
+               netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+               return rc;
+       }
+
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       return 0;
+}
+
 static const struct net_device_ops ibmveth_netdev_ops = {
        .ndo_open               = ibmveth_open,
        .ndo_stop               = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
        .ndo_fix_features       = ibmveth_fix_features,
        .ndo_set_features       = ibmveth_set_features,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = ibmveth_set_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ibmveth_poll_controller,
 #endif
index e9c3a87e5b115dc690ef2b81bbe16a5480dae5b1..05f88394f9a5599dcc1a4076a05694e596e6fdbe 100644 (file)
@@ -414,7 +414,7 @@ enum cb_status {
 
 /**
  * cb_command - Command Block flags
- * @cb_tx_nc:  0: controler does CRC (normal),  1: CRC from skb memory
+ * @cb_tx_nc:  0: controller does CRC (normal),  1: CRC from skb memory
  */
 enum cb_command {
        cb_nop    = 0x0000,
index 7f997d36948f3e59621b3698b1a43b36166ecf93..b548ef0cf56be1278daa2d92edcde4fed51f9541 100644 (file)
@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                     struct e1000_rx_ring *rx_ring,
                                     int *work_done, int work_to_do);
+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
+                                        struct e1000_rx_ring *rx_ring,
+                                        int cleaned_count)
+{
+}
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                   struct e1000_rx_ring *rx_ring,
                                   int cleaned_count);
@@ -516,6 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        u32 rctl, tctl;
 
+       netif_carrier_off(netdev);
 
        /* disable receives in the hardware */
        rctl = er32(RCTL);
@@ -544,7 +550,6 @@ void e1000_down(struct e1000_adapter *adapter)
 
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
-       netif_carrier_off(netdev);
 
        e1000_reset(adapter);
        e1000_clean_all_tx_rings(adapter);
@@ -1111,7 +1116,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (e1000_read_mac_addr(hw))
                        e_err(probe, "EEPROM Read Error\n");
        }
-       /* don't block initalization here due to bad MAC address */
+       /* don't block initialization here due to bad MAC address */
        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->dev_addr))
@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                msleep(1);
        /* e1000_down has a dependency on max_frame_size */
        hw->max_frame_size = max_frame;
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               /* prevent buffers from being reallocated */
+               adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
                e1000_down(adapter);
+       }
 
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
index 7523f510c7e415e194cd3b1f620bf09908522644..9d81c03174334be84a3d605ff03417effc4c7ccd 100644 (file)
@@ -603,12 +603,15 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
        u16 i;
        u32 nvm_size;
 
-       /* Can't read flash registers if the register set isn't mapped. */
        nvm->type = e1000_nvm_flash_sw;
-       /* in SPT, gfpreg doesn't exist. NVM size is taken from the
-        * STRAP register
-        */
+
        if (hw->mac.type == e1000_pch_spt) {
+               /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+                * STRAP register. This is because in SPT the GbE Flash region
+                * is no longer accessed through the flash registers. Instead,
+                * the mechanism has changed, and the Flash region access
+                * registers are now implemented in GbE memory space.
+                */
                nvm->flash_base_addr = 0;
                nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
                    * NVM_SIZE_MULTIPLIER;
@@ -618,6 +621,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
                /* Set the base address for flash register access */
                hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
        } else {
+               /* Can't read flash registers if register set isn't mapped. */
                if (!hw->flash_address) {
                        e_dbg("ERROR: Flash registers not mapped\n");
                        return -E1000_ERR_CONFIG;
index 6fa4fc05709ef3f1b346b9bd13594ab6e38bcdc5..4be4576d71aaa47fa0955257ab4e6d6cacdc1607 100644 (file)
@@ -6833,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_ioremap;
 
        if ((adapter->flags & FLAG_HAS_FLASH) &&
-           (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+           (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
+           (hw->mac.type < e1000_pch_spt)) {
                flash_start = pci_resource_start(pdev, 1);
                flash_len = pci_resource_len(pdev, 1);
                adapter->hw.flash_address = ioremap(flash_start, flash_len);
@@ -7069,7 +7070,7 @@ err_hw_init:
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 err_sw_init:
-       if (adapter->hw.flash_address)
+       if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
                iounmap(adapter->hw.flash_address);
        e1000e_reset_interrupt_capability(adapter);
 err_flashmap:
@@ -7142,7 +7143,8 @@ static void e1000_remove(struct pci_dev *pdev)
        kfree(adapter->rx_ring);
 
        iounmap(adapter->hw.hw_addr);
-       if (adapter->hw.flash_address)
+       if ((adapter->hw.flash_address) &&
+           (adapter->hw.mac.type < e1000_pch_spt))
                iounmap(adapter->hw.flash_address);
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
index 42eb4344a9dc077c52bfa97723d99af2bbcbe06b..59edfd4446cdaf4daf95659ec0d7bec6bf6c0916 100644 (file)
@@ -439,6 +439,7 @@ extern char fm10k_driver_name[];
 extern const char fm10k_driver_version[];
 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
+__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
 netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
                                  struct fm10k_ring *tx_ring);
 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
index bf19dccd4288d8693197520d65b2f01251d891c0..6cfae6ac04eac0984037e1620394f14bc85d3fce 100644 (file)
@@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
        /* Retrieve RX Owner Data */
        id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
 
-       /* Process RX Ring*/
+       /* Process RX Ring */
        do {
                rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
                                                   &q->rx_drops);
@@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
  *  Function invalidates the index values for the queues so any updates that
  *  may have happened are ignored and the base for the queue stats is reset.
  **/
-
 void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
 {
        u32 i;
index 651f53bc737686118e1f2fc457f452de59260998..33b6106c764becbd48e1dcdd4cda4e74b66c5497 100644 (file)
@@ -1019,7 +1019,7 @@ static int fm10k_set_channels(struct net_device *dev,
 }
 
 static int fm10k_get_ts_info(struct net_device *dev,
-                          struct ethtool_ts_info *info)
+                            struct ethtool_ts_info *info)
 {
        struct fm10k_intfc *interface = netdev_priv(dev);
 
index 0601908642389077b1df46546af52e4a734c8db0..a02308f5048fbf1911740c379f423a0336528b19 100644 (file)
@@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
        if (vf_idx >= iov_data->num_vfs)
                return FM10K_ERR_PARAM;
 
-       /* determine if an update has occured and if so notify the VF */
+       /* determine if an update has occurred and if so notify the VF */
        vf_info = &iov_data->vf_info[vf_idx];
        if (vf_info->sw_vid != pvid) {
                vf_info->sw_vid = pvid;
index 84ab9eea2768406e2b3f9a22f946e7417160f795..c325bc0c83382c9f4254cbdb1fb1c4758969625b 100644 (file)
@@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
        if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
                return NULL;
 
-       /* verify protocol is transparent Ethernet bridging */
-       if (nvgre_hdr->proto != htons(ETH_P_TEB))
-               return NULL;
-
        /* report start of ethernet header */
        if (nvgre_hdr->flags & NVGRE_TNI)
                return (struct ethhdr *)(nvgre_hdr + 1);
@@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
        return (struct ethhdr *)(&nvgre_hdr->tni);
 }
 
-static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
+__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
 {
+       u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
        struct ethhdr *eth_hdr;
-       u8 l4_hdr = 0;
 
-/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
-#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET       164
-       if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
-           FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
+       if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+           skb->inner_protocol != htons(ETH_P_TEB))
                return 0;
 
        switch (vlan_get_protocol(skb)) {
@@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
 
        switch (eth_hdr->h_proto) {
        case htons(ETH_P_IP):
+               inner_l4_hdr = inner_ip_hdr(skb)->protocol;
+               break;
        case htons(ETH_P_IPV6):
+               inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
                break;
        default:
                return 0;
        }
 
+       switch (inner_l4_hdr) {
+       case IPPROTO_TCP:
+               inner_l4_hlen = inner_tcp_hdrlen(skb);
+               break;
+       case IPPROTO_UDP:
+               inner_l4_hlen = 8;
+               break;
+       default:
+               return 0;
+       }
+
+       /* The hardware allows tunnel offloads only if the combined inner and
+        * outer header is 184 bytes or less
+        */
+       if (skb_inner_transport_header(skb) + inner_l4_hlen -
+           skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
+               return 0;
+
        return eth_hdr->h_proto;
 }
 
@@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
 {
        netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
+       /* Memory barrier before checking head and tail */
        smp_mb();
 
-       /* We need to check again in a case another CPU has just
-        * made room available. */
+       /* Check again in a case another CPU has just made room available */
        if (likely(fm10k_desc_unused(tx_ring) < size))
                return -EBUSY;
 
index 9f5457c9e627620dfe421bc173c85b24fddb67a4..14ee696e98308222460fe680b2c128d413eb6839 100644 (file)
@@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
  *  @fifo: pointer to FIFO
  *  @offset: offset to add to head
  *
- *  This function returns the indicies into the fifo based on head + offset
+ *  This function returns the indices into the fifo based on head + offset
  **/
 static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
 {
@@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
  *  @fifo: pointer to FIFO
  *  @offset: offset to add to tail
  *
- *  This function returns the indicies into the fifo based on tail + offset
+ *  This function returns the indices into the fifo based on tail + offset
  **/
 static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
 {
@@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
  *  fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
  *  @mbx: pointer to mailbox
  *
- *  This function will take a seciton of the Rx FIFO and copy it into the
+ *  This function will take a section of the Rx FIFO and copy it into the
                mbx->tail--;
  *  mailbox memory.  The offset in mbmem is based on the lower bits of the
  *  tail and len determines the length to copy.
@@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw,
  *  @hw: pointer to hardware structure
  *  @mbx: pointer to mailbox
  *
- *  This function will take a seciton of the mailbox memory and copy it
+ *  This function will take a section of the mailbox memory and copy it
  *  into the Rx FIFO.  The offset is based on the lower bits of the
  *  head and len determines the length to copy.
  **/
@@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw,
  *  @tail: tail index of message
  *
  *  This function will first validate the tail index and size for the
- *  incoming message.  It then updates the acknowlegment number and
+ *  incoming message.  It then updates the acknowledgment number and
  *  copies the data into the FIFO.  It will return the number of messages
  *  dequeued on success and a negative value on error.
  **/
@@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
                err = fm10k_fifo_enqueue(&mbx->tx, msg);
        }
 
-       /* if we failed trhead the error */
+       /* if we failed treat the error */
        if (err) {
                mbx->timeout = 0;
                mbx->tx_busy++;
@@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
 {
        u32 mbmem = mbx->mbmem_reg;
 
-       /* write new msg header to notify recepient of change */
+       /* write new msg header to notify recipient of change */
        fm10k_write_reg(hw, mbmem, mbx->mbx_hdr);
 
        /* write mailbox to sent interrupt */
@@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
        /* we will need to pull all of the fields for verification */
        head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
 
-       /* we only have lower 10 bits of error number os add upper bits */
+       /* we only have lower 10 bits of error number so add upper bits */
        err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
        err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
 
@@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
        mbx->timeout = 0;
        mbx->udelay = FM10K_MBX_INIT_DELAY;
 
-       /* initalize tail and head */
+       /* initialize tail and head */
        mbx->tail = 1;
        mbx->head = 1;
 
@@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
        mbx->local = FM10K_SM_MBX_VERSION;
        mbx->remote = 0;
 
-       /* initalize tail and head */
+       /* initialize tail and head */
        mbx->tail = 1;
        mbx->head = 1;
 
index cfde8bac1aeb2a23e4c447f786e06394f3ce285e..d5b303dad95e439258ab50f95ca3c9b73a4aa874 100644 (file)
@@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
  * fm10k_request_glort_range - Request GLORTs for use in configuring rules
  * @interface: board private structure
  *
- * This function allocates a range of glorts for this inteface to use.
+ * This function allocates a range of glorts for this interface to use.
  **/
 static void fm10k_request_glort_range(struct fm10k_intfc *interface)
 {
@@ -781,7 +781,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
 
        fm10k_mbx_lock(interface);
 
-       /* only need to update the VLAN if not in promiscous mode */
+       /* only need to update the VLAN if not in promiscuous mode */
        if (!(netdev->flags & IFF_PROMISC)) {
                err = hw->mac.ops.update_vlan(hw, vid, 0, set);
                if (err)
@@ -970,7 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev)
 
        fm10k_mbx_lock(interface);
 
-       /* syncronize all of the addresses */
+       /* synchronize all of the addresses */
        if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
                __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
                if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
@@ -1051,7 +1051,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
                                           vid, true, 0);
        }
 
-       /* syncronize all of the addresses */
+       /* synchronize all of the addresses */
        if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
                __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
                if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
@@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
        }
 }
 
+static netdev_features_t fm10k_features_check(struct sk_buff *skb,
+                                             struct net_device *dev,
+                                             netdev_features_t features)
+{
+       if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
+               return features;
+
+       return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+}
+
 static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_open               = fm10k_open,
        .ndo_stop               = fm10k_close,
@@ -1372,6 +1382,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_do_ioctl           = fm10k_ioctl,
        .ndo_dfwd_add_station   = fm10k_dfwd_add_station,
        .ndo_dfwd_del_station   = fm10k_dfwd_del_station,
+       .ndo_features_check     = fm10k_features_check,
 };
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
index 4f5892cc32d70c15b7911e0975eb6f39a5f5b6fb..8978d55a1c514d18bdd22de6988a00b7f8ca6b22 100644 (file)
@@ -648,7 +648,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
        /* Configure the Rx buffer size for one buff without split */
        srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
 
-       /* Configure the Rx ring to supress loopback packets */
+       /* Configure the Rx ring to suppress loopback packets */
        srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
        fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
 
index 7e4711958e463a959c69365fdb3d5c9ff956f3e4..159cd8463800bebfd584363013b77526bdb3161f 100644 (file)
@@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
        vid = (vid << 17) >> 17;
 
        /* verify the reserved 0 fields are 0 */
-       if (len >= FM10K_VLAN_TABLE_VID_MAX ||
-           vid >= FM10K_VLAN_TABLE_VID_MAX)
+       if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
                return FM10K_ERR_PARAM;
 
        /* Loop through the table updating all required VLANs */
@@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
 }
 
 /**
- *  fm10k_update_uc_addr_pf - Update device unicast addresss
+ *  fm10k_update_xc_addr_pf - Update device addresses
  *  @hw: pointer to the HW structure
  *  @glort: base resource tag for this request
  *  @mac: MAC address to add/remove from table
@@ -356,7 +355,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
 }
 
 /**
- *  fm10k_update_uc_addr_pf - Update device unicast addresss
+ *  fm10k_update_uc_addr_pf - Update device unicast addresses
  *  @hw: pointer to the HW structure
  *  @glort: base resource tag for this request
  *  @mac: MAC address to add/remove from table
@@ -454,7 +453,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
                        break;
        }
 
-       /* always reset VFITR2[0] to point to last enabled PF vector*/
+       /* always reset VFITR2[0] to point to last enabled PF vector */
        fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
 
        /* reset ITR2[0] to point to last enabled PF vector */
@@ -812,7 +811,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
        if (vf_idx >= hw->iov.num_vfs)
                return FM10K_ERR_PARAM;
 
-       /* determine vector offset and count*/
+       /* determine vector offset and count */
        vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
        vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 
@@ -951,7 +950,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
        if (vf_info->mbx.ops.disconnect)
                vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
 
-       /* determine vector offset and count*/
+       /* determine vector offset and count */
        vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
        vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 
@@ -1035,7 +1034,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
                        ((u32)vf_info->mac[2]);
        }
 
-       /* map queue pairs back to VF from last to first*/
+       /* map queue pairs back to VF from last to first */
        for (i = queues_per_pool; i--;) {
                fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
                fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
@@ -1141,7 +1140,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
  *
  *  This function is a default handler for MSI-X requests from the VF.  The
  *  assumption is that in this case it is acceptable to just directly
- *  hand off the message form the VF to the underlying shared code.
+ *  hand off the message from the VF to the underlying shared code.
  **/
 s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
                          struct fm10k_mbx_info *mbx)
@@ -1160,7 +1159,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
  *
  *  This function is a default handler for MAC/VLAN requests from the VF.
  *  The assumption is that in this case it is acceptable to just directly
- *  hand off the message form the VF to the underlying shared code.
+ *  hand off the message from the VF to the underlying shared code.
  **/
 s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                              struct fm10k_mbx_info *mbx)
@@ -1404,7 +1403,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
                                                    &stats->vlan_drop);
                loopback_drop = fm10k_read_hw_stats_32b(hw,
                                                        FM10K_STATS_LOOPBACK_DROP,
-                                                    &stats->loopback_drop);
+                                                       &stats->loopback_drop);
                nodesc_drop = fm10k_read_hw_stats_32b(hw,
                                                      FM10K_STATS_NODESC_DROP,
                                                      &stats->nodesc_drop);
@@ -1573,7 +1572,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
        s32 ret_val = 0;
        u32 dma_ctrl2;
 
-       /* verify the switch is ready for interraction */
+       /* verify the switch is ready for interaction */
        dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
        if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
                goto out;
index fd0a05f011a863e48f47994577c9d9077d8148e3..9b29d7b0377a4302aea542a2afd99460707241f1 100644 (file)
@@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
 /**
  *  fm10k_tlv_msg_test - Validate all results on test message receive
  *  @hw: Pointer to hardware structure
- *  @results: Pointer array to attributes in the mesage
+ *  @results: Pointer array to attributes in the message
  *  @mbx: Pointer to mailbox information structure
  *
  *  This function does a check to verify all attributes match what the test
index 7c6d9d5a8ae5c5042f8e68843a26ef770b45bbf5..4af96686c58407b8385113731685dbf1f7933749 100644 (file)
@@ -356,6 +356,9 @@ struct fm10k_hw;
 #define FM10K_QUEUE_DISABLE_TIMEOUT            100
 #define FM10K_RESET_TIMEOUT                    150
 
+/* Maximum supported combined inner and outer header length for encapsulation */
+#define FM10K_TUNNEL_HEADER_LENGTH     184
+
 /* VF registers */
 #define FM10K_VFCTRL           0x00000
 #define FM10K_VFCTRL_RST                       0x00000008
@@ -593,7 +596,7 @@ struct fm10k_vf_info {
        u16                     sw_vid;         /* Switch API assigned VLAN */
        u16                     pf_vid;         /* PF assigned Default VLAN */
        u8                      mac[ETH_ALEN];  /* PF Default MAC address */
-       u8                      vsi;            /* VSI idenfifier */
+       u8                      vsi;            /* VSI identifier */
        u8                      vf_idx;         /* which VF this is */
        u8                      vf_flags;       /* flags indicating what modes
                                                 * are supported for the port
index f0aa0f97b4a91d7f7cc4c07d990e2e04f06aafa1..17219678439abbfa2a3f2acd3dddd09c6ffee94e 100644 (file)
@@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
        if (err)
                return err;
 
-       /* If permenant address is set then we need to restore it */
+       /* If permanent address is set then we need to restore it */
        if (is_valid_ether_addr(perm_addr)) {
                bal = (((u32)perm_addr[3]) << 24) |
                      (((u32)perm_addr[4]) << 16) |
@@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
  *  fm10k_reset_hw_vf - VF hardware reset
  *  @hw: pointer to hardware structure
  *
- *  This function should return the hardare to a state similar to the
+ *  This function should return the hardware to a state similar to the
  *  one it is in after just being initialized.
  **/
 static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
@@ -252,7 +252,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
 }
 
 /**
- *  fm10k_update_uc_addr_vf - Update device unicast address
+ *  fm10k_update_uc_addr_vf - Update device unicast addresses
  *  @hw: pointer to the HW structure
  *  @glort: unused
  *  @mac: MAC address to add/remove from table
@@ -282,7 +282,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
            memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
                return FM10K_ERR_PARAM;
 
-       /* add bit to notify us if this is a set of clear operation */
+       /* add bit to notify us if this is a set or clear operation */
        if (!add)
                vid |= FM10K_VLAN_CLEAR;
 
@@ -295,7 +295,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
 }
 
 /**
- *  fm10k_update_mc_addr_vf - Update device multicast address
+ *  fm10k_update_mc_addr_vf - Update device multicast addresses
  *  @hw: pointer to the HW structure
  *  @glort: unused
  *  @mac: MAC address to add/remove from table
@@ -319,7 +319,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
        if (!is_multicast_ether_addr(mac))
                return FM10K_ERR_PARAM;
 
-       /* add bit to notify us if this is a set of clear operation */
+       /* add bit to notify us if this is a set or clear operation */
        if (!add)
                vid |= FM10K_VLAN_CLEAR;
 
@@ -515,7 +515,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
  *  @hw: pointer to the hardware structure
  *
  *  Function reads the content of 2 registers, combined to represent a 64 bit
- *  value measured in nanosecods.  In order to guarantee the value is accurate
+ *  value measured in nanoseconds.  In order to guarantee the value is accurate
  *  we check the 32 most significant bits both before and after reading the
  *  32 least significant bits to verify they didn't change as we were reading
  *  the registers.
index 023e452aff8ccdb88d13735b852c2272ae85f466..b4729ba57c9c1e88f10fa2ceaecd32821e7d18e7 100644 (file)
@@ -37,7 +37,6 @@ i40e-objs := i40e_main.o \
        i40e_hmc.o      \
        i40e_lan_hmc.o  \
        i40e_nvm.o      \
-       i40e_configfs.o \
        i40e_debugfs.o  \
        i40e_diag.o     \
        i40e_txrx.o     \
index 1e9576bb911eafa33426581b3e49b8b9c9e64e6d..7ce8e600c13ccff9ca31c3815375657ffbee90c9 100644 (file)
@@ -72,6 +72,7 @@
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
 #define I40E_MAX_REGISTER     0x800000
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
 #define I40E_DEFAULT_NUM_DESCRIPTORS  512
 #define I40E_REQ_DESCRIPTOR_MULTIPLE  32
 #define I40E_MIN_NUM_DESCRIPTORS      64
@@ -174,6 +175,7 @@ struct i40e_lump_tracking {
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
 #define I40E_FDIR_BUFFER_FULL_MARGIN   10
 #define I40E_FDIR_BUFFER_HEAD_ROOM     32
+#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
@@ -275,7 +277,7 @@ struct i40e_pf {
        enum i40e_interrupt_policy int_policy;
        u16 rx_itr_default;
        u16 tx_itr_default;
-       u16 msg_enable;
+       u32 msg_enable;
        char int_name[I40E_INT_NAME_STR_LEN];
        u16 adminq_work_limit; /* num of admin receive queue desc to process */
        unsigned long service_timer_period;
@@ -471,6 +473,9 @@ struct i40e_vsi {
        u16 rx_itr_setting;
        u16 tx_itr_setting;
 
+       u16 rss_table_size;
+       u16 rss_size;
+
        u16 max_frame;
        u16 rx_hdr_len;
        u16 rx_buf_len;
@@ -488,6 +493,7 @@ struct i40e_vsi {
 
        u16 base_queue;      /* vsi's first queue in hw array */
        u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+       u16 req_queue_pairs; /* User requested queue pairs */
        u16 num_queue_pairs; /* Used tx and rx pairs */
        u16 num_desc;
        enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
@@ -557,14 +563,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
        static char buf[32];
 
        snprintf(buf, sizeof(buf),
-                "f%d.%d a%d.%d n%02x.%02x e%08x",
-                hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+                "f%d.%d.%05d a%d.%d n%x.%02x e%x",
+                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
                 hw->aq.api_maj_ver, hw->aq.api_min_ver,
                 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
                        I40E_NVM_VERSION_HI_SHIFT,
                 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
                        I40E_NVM_VERSION_LO_SHIFT,
-                hw->nvm.eetrack);
+                (hw->nvm.eetrack & 0xffffff));
 
        return buf;
 }
@@ -631,9 +637,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
 int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add);
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
-int i40e_get_current_fd_count(struct i40e_pf *pf);
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
-int i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_current_fd_count(struct i40e_pf *pf);
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_global_fd_count(struct i40e_pf *pf);
 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
@@ -725,6 +732,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
 #ifdef CONFIG_I40E_DCB
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+                          struct i40e_dcbx_config *old_cfg,
                           struct i40e_dcbx_config *new_cfg);
 void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
 void i40e_dcbnl_setup(struct i40e_vsi *vsi);
@@ -741,10 +749,6 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
 void i40e_ptp_init(struct i40e_pf *pf);
 void i40e_ptp_stop(struct i40e_pf *pf);
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-int i40e_configfs_init(void);
-void i40e_configfs_exit(void);
-#endif /* CONFIG_CONFIGFS_FS */
 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
index 77f6254a89ac6078136e7cd92af1f910981c3f6f..3e0d20037675e84164ae9da793ddc080737e1d81 100644 (file)
@@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                ret_code = i40e_aq_get_firmware_version(hw,
                                                        &hw->aq.fw_maj_ver,
                                                        &hw->aq.fw_min_ver,
+                                                       &hw->aq.fw_build,
                                                        &hw->aq.api_maj_ver,
                                                        &hw->aq.api_min_ver,
                                                        NULL);
@@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                goto init_adminq_free_arq;
 
        /* get the NVM version info */
-       i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+       i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+                          &hw->nvm.version);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
index de17b6fbcc4e2a2f42054a875f961f9344a27d9a..28e519a50de4063edcae851c6e94fce88687203b 100644 (file)
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
        u16 asq_buf_size;               /* send queue buffer size */
        u16 fw_maj_ver;                 /* firmware major version */
        u16 fw_min_ver;                 /* firmware minor version */
+       u32 fw_build;                   /* firmware build number */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        bool nvm_release_on_done;
index 88b2d45578dd5cc1413e511741cf331da411b982..fb78bdd2eb956c7d77b18e209aad8b33a24a9063 100644 (file)
@@ -85,9 +85,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 {
        struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
        u16 len = le16_to_cpu(aq_desc->datalen);
-       u8 *aq_buffer = (u8 *)buffer;
-       u32 data[4];
-       u32 i = 0;
+       u8 *buf = (u8 *)buffer;
+       u16 i = 0;
 
        if ((!(mask & hw->debug_mask)) || (desc == NULL))
                return;
@@ -109,29 +108,30 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
                   le32_to_cpu(aq_desc->params.external.addr_low));
 
        if ((buffer != NULL) && (aq_desc->datalen != 0)) {
-               memset(data, 0, sizeof(data));
                i40e_debug(hw, mask, "AQ CMD Buffer:\n");
                if (buf_len < len)
                        len = buf_len;
-               for (i = 0; i < len; i++) {
-                       data[((i % 16) / 4)] |=
-                               ((u32)aq_buffer[i]) << (8 * (i % 4));
-                       if ((i % 16) == 15) {
-                               i40e_debug(hw, mask,
-                                          "\t0x%04X  %08X %08X %08X %08X\n",
-                                          i - 15, le32_to_cpu(data[0]),
-                                          le32_to_cpu(data[1]),
-                                          le32_to_cpu(data[2]),
-                                          le32_to_cpu(data[3]));
-                               memset(data, 0, sizeof(data));
-                       }
+               /* write the full 16-byte chunks */
+               for (i = 0; i < (len - 16); i += 16)
+                       i40e_debug(hw, mask,
+                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+                                  i, buf[i], buf[i + 1], buf[i + 2],
+                                  buf[i + 3], buf[i + 4], buf[i + 5],
+                                  buf[i + 6], buf[i + 7], buf[i + 8],
+                                  buf[i + 9], buf[i + 10], buf[i + 11],
+                                  buf[i + 12], buf[i + 13], buf[i + 14],
+                                  buf[i + 15]);
+               /* write whatever's left over without overrunning the buffer */
+               if (i < len) {
+                       char d_buf[80];
+                       int j = 0;
+
+                       memset(d_buf, 0, sizeof(d_buf));
+                       j += sprintf(d_buf, "\t0x%04X ", i);
+                       while (i < len)
+                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
+                       i40e_debug(hw, mask, "%s\n", d_buf);
                }
-               if ((i % 16) != 0)
-                       i40e_debug(hw, mask, "\t0x%04X  %08X %08X %08X %08X\n",
-                                  i - (i % 16), le32_to_cpu(data[0]),
-                                  le32_to_cpu(data[1]),
-                                  le32_to_cpu(data[2]),
-                                  le32_to_cpu(data[3]));
        }
 }
 
@@ -541,7 +541,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
        I40E_PTT_UNUSED_ENTRY(255)
 };
 
-
 /**
  * i40e_init_shared_code - Initialize the shared code
  * @hw: pointer to hardware structure
@@ -834,6 +833,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
        case I40E_PHY_TYPE_10GBASE_CR1:
        case I40E_PHY_TYPE_40GBASE_CR4:
        case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+       case I40E_PHY_TYPE_40GBASE_AOC:
+       case I40E_PHY_TYPE_10GBASE_AOC:
                media = I40E_MEDIA_TYPE_DA;
                break;
        case I40E_PHY_TYPE_1000BASE_KX:
@@ -875,8 +876,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
         * The grst delay value is in 100ms units, and we'll wait a
         * couple counts longer to be sure we don't just miss the end.
         */
-       grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
-                       >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+       grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+                   I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+                   I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
        for (cnt = 0; cnt < grst_del + 2; cnt++) {
                reg = rd32(hw, I40E_GLGEN_RSTAT);
                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -1082,8 +1084,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
        return gpio_val;
 }
 
-#define I40E_LED0 22
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
 #define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
 
 /**
  * i40e_led_get - return current on/off mode
@@ -1096,6 +1101,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
  **/
 u32 i40e_led_get(struct i40e_hw *hw)
 {
+       u32 current_mode = 0;
        u32 mode = 0;
        int i;
 
@@ -1108,6 +1114,20 @@ u32 i40e_led_get(struct i40e_hw *hw)
                if (!gpio_val)
                        continue;
 
+               /* ignore gpio LED src mode entries related to the activity
+                * LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               switch (current_mode) {
+               case I40E_COMBINED_ACTIVITY:
+               case I40E_FILTER_ACTIVITY:
+               case I40E_MAC_ACTIVITY:
+                       continue;
+               default:
+                       break;
+               }
+
                mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
                        I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
                break;
@@ -1127,6 +1147,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
  **/
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 {
+       u32 current_mode = 0;
        int i;
 
        if (mode & 0xfffffff0)
@@ -1141,6 +1162,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                if (!gpio_val)
                        continue;
 
+               /* ignore gpio LED src mode entries related to the activity
+                * LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               switch (current_mode) {
+               case I40E_COMBINED_ACTIVITY:
+               case I40E_FILTER_ACTIVITY:
+               case I40E_MAC_ACTIVITY:
+                       continue;
+               default:
+                       break;
+               }
+
                gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
                /* this & is a bit of paranoia, but serves as a range check */
                gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -1447,6 +1482,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        else
                hw_link_info->lse_enable = false;
 
+       if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+            hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+               hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
        /* save link status information */
        if (link)
                *link = *hw_link_info;
@@ -1737,6 +1776,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
  * @hw: pointer to the hw struct
  * @fw_major_version: firmware major version
  * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
  * @api_major_version: major queue version
  * @api_minor_version: minor queue version
  * @cmd_details: pointer to command details structure or NULL
@@ -1745,6 +1785,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
  **/
 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
                                u16 *fw_major_version, u16 *fw_minor_version,
+                               u32 *fw_build,
                                u16 *api_major_version, u16 *api_minor_version,
                                struct i40e_asq_cmd_details *cmd_details)
 {
@@ -1758,13 +1799,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
        if (!status) {
-               if (fw_major_version != NULL)
+               if (fw_major_version)
                        *fw_major_version = le16_to_cpu(resp->fw_major);
-               if (fw_minor_version != NULL)
+               if (fw_minor_version)
                        *fw_minor_version = le16_to_cpu(resp->fw_minor);
-               if (api_major_version != NULL)
+               if (fw_build)
+                       *fw_build = le32_to_cpu(resp->fw_build);
+               if (api_major_version)
                        *api_major_version = le16_to_cpu(resp->api_major);
-               if (api_minor_version != NULL)
+               if (api_minor_version)
                        *api_minor_version = le16_to_cpu(resp->api_minor);
        }
 
@@ -2083,7 +2126,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
  * Read the register using the admin queue commands
  **/
 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
-                               u32  reg_addr, u64 *reg_val,
+                               u32 reg_addr, u64 *reg_val,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
@@ -2094,17 +2137,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
        if (reg_val == NULL)
                return I40E_ERR_PARAM;
 
-       i40e_fill_default_direct_cmd_desc(&desc,
-                                         i40e_aqc_opc_debug_read_reg);
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
 
        cmd_resp->address = cpu_to_le32(reg_addr);
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
        if (!status) {
-               *reg_val = ((u64)cmd_resp->value_high << 32) |
-                           (u64)cmd_resp->value_low;
-               *reg_val = le64_to_cpu(*reg_val);
+               *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
+                          (u64)le32_to_cpu(cmd_resp->value_low);
        }
 
        return status;
@@ -2824,7 +2865,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-       if (!status)
+       if (!status && filter_index)
                *filter_index = resp->index;
 
        return status;
@@ -3366,9 +3407,9 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
  * is not passed then only register at 'reg_addr0' is read.
  *
  **/
-i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
-                                  u32 reg_addr0, u32 *reg_val0,
-                                  u32 reg_addr1, u32 *reg_val1)
+static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+                                         u32 reg_addr0, u32 *reg_val0,
+                                         u32 reg_addr1, u32 *reg_val1)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_alternate_write *cmd_resp =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_configfs.c b/drivers/net/ethernet/intel/i40e/i40e_configfs.c
deleted file mode 100644 (file)
index 3af4f14..0000000
+++ /dev/null
@@ -1,354 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#include <linux/configfs.h>
-#include "i40e.h"
-
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-
-/**
- * configfs structure for i40e
- *
- * This file adds code for configfs support for the i40e driver.  This sets
- * up a filesystem under /sys/kernel/config in which configuration changes
- * can be made for the driver's netdevs.
- *
- * The initialization in this code creates the "i40e" entry in the configfs
- * system.  After that, the user needs to use mkdir to create configurations
- * for specific netdev ports; for example "mkdir eth3".  This code will verify
- * that such a netdev exists and that it is owned by i40e.
- *
- **/
-
-struct i40e_cfgfs_vsi {
-       struct config_item item;
-       struct i40e_vsi *vsi;
-};
-
-static inline struct i40e_cfgfs_vsi *to_i40e_cfgfs_vsi(struct config_item *item)
-{
-       return item ? container_of(item, struct i40e_cfgfs_vsi, item) : NULL;
-}
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_min_bw = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "min_bw",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_max_bw = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "max_bw",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_commit = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "commit",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_port_count = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "ports",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_part_count = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "partitions",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute *i40e_cfgfs_vsi_attrs[] = {
-       &i40e_cfgfs_vsi_attr_min_bw,
-       &i40e_cfgfs_vsi_attr_max_bw,
-       &i40e_cfgfs_vsi_attr_commit,
-       &i40e_cfgfs_vsi_attr_port_count,
-       &i40e_cfgfs_vsi_attr_part_count,
-       NULL,
-};
-
-/**
- * i40e_cfgfs_vsi_attr_show - Show a VSI's NPAR BW partition info
- * @item: A pointer back to the configfs item created on driver load
- * @attr: A pointer to this item's configuration attribute
- * @page: A pointer to the output buffer
- **/
-static ssize_t i40e_cfgfs_vsi_attr_show(struct config_item *item,
-                                       struct configfs_attribute *attr,
-                                       char *page)
-{
-       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item);
-       struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back;
-       ssize_t count;
-
-       if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi])
-               return 0;
-
-       if (strncmp(attr->ca_name, "min_bw", 6) == 0)
-               count = sprintf(page, "%s %s %d%%\n",
-                               i40e_cfgfs_vsi->vsi->netdev->name,
-                               (pf->npar_min_bw & I40E_ALT_BW_RELATIVE_MASK) ?
-                               "Relative Min BW" : "Absolute Min BW",
-                               pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK);
-       else if (strncmp(attr->ca_name, "max_bw", 6) == 0)
-               count = sprintf(page, "%s %s %d%%\n",
-                               i40e_cfgfs_vsi->vsi->netdev->name,
-                               (pf->npar_max_bw & I40E_ALT_BW_RELATIVE_MASK) ?
-                               "Relative Max BW" : "Absolute Max BW",
-                               pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK);
-       else if (strncmp(attr->ca_name, "ports", 5) == 0)
-               count = sprintf(page, "%d\n",
-                               pf->hw.num_ports);
-       else if (strncmp(attr->ca_name, "partitions", 10) == 0)
-               count = sprintf(page, "%d\n",
-                               pf->hw.num_partitions);
-       else
-               return 0;
-
-       return count;
-}
-
-/**
- * i40e_cfgfs_vsi_attr_store - Show a VSI's NPAR BW partition info
- * @item: A pointer back to the configfs item created on driver load
- * @attr: A pointer to this item's configuration attribute
- * @page: A pointer to the user input buffer holding the user input values
- **/
-static ssize_t i40e_cfgfs_vsi_attr_store(struct config_item *item,
-                                        struct configfs_attribute *attr,
-                                        const char *page, size_t count)
-{
-       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item);
-       struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back;
-       char *p = (char *)page;
-       int rc;
-       unsigned long tmp;
-
-       if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi])
-               return 0;
-
-       if (!p || (*p && (*p == '\n')))
-               return -EINVAL;
-
-       rc = kstrtoul(p, 10, &tmp);
-       if (rc)
-               return rc;
-       if (tmp > 100)
-               return -ERANGE;
-
-       if (strncmp(attr->ca_name, "min_bw", 6) == 0) {
-               if (tmp > (pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK))
-                       return -ERANGE;
-               /* Preserve the valid and relative BW bits - the rest is
-                * don't care.
-                */
-               pf->npar_min_bw &= (I40E_ALT_BW_RELATIVE_MASK |
-                                   I40E_ALT_BW_VALID_MASK);
-               pf->npar_min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
-               i40e_set_npar_bw_setting(pf);
-       } else if (strncmp(attr->ca_name, "max_bw", 6) == 0) {
-               if (tmp < 1 ||
-                   tmp < (pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK))
-                       return -ERANGE;
-               /* Preserve the valid and relative BW bits - the rest is
-                * don't care.
-                */
-               pf->npar_max_bw &= (I40E_ALT_BW_RELATIVE_MASK |
-                                   I40E_ALT_BW_VALID_MASK);
-               pf->npar_max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
-               i40e_set_npar_bw_setting(pf);
-       } else if (strncmp(attr->ca_name, "commit", 6) == 0 && tmp == 1) {
-               if (i40e_commit_npar_bw_setting(pf))
-                       return -EIO;
-       }
-
-       return count;
-}
-
-/**
- * i40e_cfgfs_vsi_release - Free up the configuration item memory
- * @item: A pointer back to the configfs item created on driver load
- **/
-static void i40e_cfgfs_vsi_release(struct config_item *item)
-{
-       kfree(to_i40e_cfgfs_vsi(item));
-}
-
-static struct configfs_item_operations i40e_cfgfs_vsi_item_ops = {
-       .release                = i40e_cfgfs_vsi_release,
-       .show_attribute         = i40e_cfgfs_vsi_attr_show,
-       .store_attribute        = i40e_cfgfs_vsi_attr_store,
-};
-
-static struct config_item_type i40e_cfgfs_vsi_type = {
-       .ct_item_ops    = &i40e_cfgfs_vsi_item_ops,
-       .ct_attrs       = i40e_cfgfs_vsi_attrs,
-       .ct_owner       = THIS_MODULE,
-};
-
-struct i40e_cfgfs_group {
-       struct config_group group;
-};
-
-/**
- * to_i40e_cfgfs_group - Get the group pointer from the config item
- * @item: A pointer back to the configfs item created on driver load
- **/
-static inline struct i40e_cfgfs_group *
-to_i40e_cfgfs_group(struct config_item *item)
-{
-       return item ? container_of(to_config_group(item),
-                                  struct i40e_cfgfs_group, group) : NULL;
-}
-
-/**
- * i40e_cfgfs_group_make_item - Create the configfs item with group container
- * @group: A pointer to our configfs group
- * @name: A pointer to the nume of the device we're looking for
- **/
-static struct config_item *
-i40e_cfgfs_group_make_item(struct config_group *group, const char *name)
-{
-       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi;
-       struct net_device *netdev;
-       struct i40e_netdev_priv *np;
-
-       read_lock(&dev_base_lock);
-       netdev = first_net_device(&init_net);
-       while (netdev) {
-               if (strncmp(netdev->name, name, sizeof(netdev->name)) == 0)
-                       break;
-               netdev = next_net_device(netdev);
-       }
-       read_unlock(&dev_base_lock);
-
-       if (!netdev)
-               return ERR_PTR(-ENODEV);
-
-       /* is this netdev owned by i40e? */
-       if (netdev->netdev_ops->ndo_open != i40e_open)
-               return ERR_PTR(-EACCES);
-
-       i40e_cfgfs_vsi = kzalloc(sizeof(*i40e_cfgfs_vsi), GFP_KERNEL);
-       if (!i40e_cfgfs_vsi)
-               return ERR_PTR(-ENOMEM);
-
-       np = netdev_priv(netdev);
-       i40e_cfgfs_vsi->vsi = np->vsi;
-       config_item_init_type_name(&i40e_cfgfs_vsi->item, name,
-                                  &i40e_cfgfs_vsi_type);
-
-       return &i40e_cfgfs_vsi->item;
-}
-
-static struct configfs_attribute i40e_cfgfs_group_attr_description = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "description",
-       .ca_mode = S_IRUGO,
-};
-
-static struct configfs_attribute *i40e_cfgfs_group_attrs[] = {
-       &i40e_cfgfs_group_attr_description,
-       NULL,
-};
-
-static ssize_t i40e_cfgfs_group_attr_show(struct config_item *item,
-                                         struct configfs_attribute *attr,
-                                         char *page)
-{
-       return sprintf(page,
-"i40e\n"
-"\n"
-"This subsystem allows the modification of network port configurations.\n"
-"To start, use the name of the network port to be configured in a 'mkdir'\n"
-"command, e.g. 'mkdir eth3'.\n");
-}
-
-static void i40e_cfgfs_group_release(struct config_item *item)
-{
-       kfree(to_i40e_cfgfs_group(item));
-}
-
-static struct configfs_item_operations i40e_cfgfs_group_item_ops = {
-       .release        = i40e_cfgfs_group_release,
-       .show_attribute = i40e_cfgfs_group_attr_show,
-};
-
-/* Note that, since no extra work is required on ->drop_item(),
- * no ->drop_item() is provided.
- */
-static struct configfs_group_operations i40e_cfgfs_group_ops = {
-       .make_item      = i40e_cfgfs_group_make_item,
-};
-
-static struct config_item_type i40e_cfgfs_group_type = {
-       .ct_item_ops    = &i40e_cfgfs_group_item_ops,
-       .ct_group_ops   = &i40e_cfgfs_group_ops,
-       .ct_attrs       = i40e_cfgfs_group_attrs,
-       .ct_owner       = THIS_MODULE,
-};
-
-static struct configfs_subsystem i40e_cfgfs_group_subsys = {
-       .su_group = {
-               .cg_item = {
-                       .ci_namebuf = "i40e",
-                       .ci_type = &i40e_cfgfs_group_type,
-               },
-       },
-};
-
-/**
- * i40e_configfs_init - Initialize configfs support for our driver
- **/
-int i40e_configfs_init(void)
-{
-       int ret;
-       struct configfs_subsystem *subsys;
-
-       subsys = &i40e_cfgfs_group_subsys;
-
-       config_group_init(&subsys->su_group);
-       mutex_init(&subsys->su_mutex);
-       ret = configfs_register_subsystem(subsys);
-       if (ret) {
-               pr_err("Error %d while registering configfs subsystem %s\n",
-                      ret, subsys->su_group.cg_item.ci_namebuf);
-               return ret;
-       }
-
-       return 0;
-}
-
-/**
- * i40e_configfs_init - Bail out - unregister configfs subsystem and release
- **/
-void i40e_configfs_exit(void)
-{
-       configfs_unregister_subsystem(&i40e_cfgfs_group_subsys);
-}
-#endif /* IS_ENABLED(CONFIG_CONFIGFS_FS) */
index 3ce43588592d99c7c5449b6cd0fb84a31177d84f..6e146675676097562985ea3cc7924562499938a3 100644 (file)
@@ -459,7 +459,7 @@ static void i40e_cee_to_dcb_v1_config(
        sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
        oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
        /* Add APPs if Error is False and Oper/Sync is True */
-       if (!err && sync && oper) {
+       if (!err) {
                /* CEE operating configuration supports FCoE/iSCSI/FIP only */
                dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
 
index 183dcb63ce98e14e5b504bed9874911b35ae18d3..2f583554a260274db87b11bbda811b9e2f5956d1 100644 (file)
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
        u32 val;
 
        val = rd32(hw, I40E_PRTDCB_GENC);
-       *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+       *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
                       I40E_PRTDCB_GENC_PFCLDA_SHIFT);
 }
 
@@ -269,22 +269,21 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
 /**
  * i40e_dcbnl_flush_apps - Delete all removed APPs
  * @pf: the corresponding pf
+ * @old_cfg: old DCBX configuration data
  * @new_cfg: new DCBX configuration data
  *
  * Find and delete all APPs that are not present in the passed
  * DCB configuration
  **/
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+                          struct i40e_dcbx_config *old_cfg,
                           struct i40e_dcbx_config *new_cfg)
 {
        struct i40e_dcb_app_priority_table app;
-       struct i40e_dcbx_config *dcbxcfg;
-       struct i40e_hw *hw = &pf->hw;
        int i;
 
-       dcbxcfg = &hw->local_dcbx_config;
-       for (i = 0; i < dcbxcfg->numapps; i++) {
-               app = dcbxcfg->app[i];
+       for (i = 0; i < old_cfg->numapps; i++) {
+               app = old_cfg->app[i];
                /* The APP is not available anymore delete it */
                if (!i40e_dcbnl_find_app(new_cfg, &app))
                        i40e_dcbnl_del_app(pf, &app);
@@ -306,9 +305,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
                return;
 
-       /* Do not setup DCB NL ops for MFP mode */
-       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-               dev->dcbnl_ops = &dcbnl_ops;
+       dev->dcbnl_ops = &dcbnl_ops;
 
        /* Set initial IEEE DCB settings */
        i40e_dcbnl_set_all(vsi);
index 30cf0be7d1b2e0cf4d44b1fed2aeef58d721574d..e802b6bc067d4d71d81573d106be01f329d7867f 100644 (file)
@@ -990,8 +990,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        if (!cmd_buf)
                return count;
        bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
-       if (bytes_not_copied < 0)
+       if (bytes_not_copied < 0) {
+               kfree(cmd_buf);
                return bytes_not_copied;
+       }
        if (bytes_not_copied > 0)
                count -= bytes_not_copied;
        cmd_buf[count] = '\0';
index 309bd1cf13e2bef75df1e27d22e3e88a20120170..01c811c99ff752fc6df4a733934176941259e414 100644 (file)
@@ -259,6 +259,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                break;
        case I40E_PHY_TYPE_XLAUI:
        case I40E_PHY_TYPE_XLPPI:
+       case I40E_PHY_TYPE_40GBASE_AOC:
                ecmd->supported = SUPPORTED_40000baseCR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_KR4:
@@ -328,6 +329,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        case I40E_PHY_TYPE_XFI:
        case I40E_PHY_TYPE_SFI:
        case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+       case I40E_PHY_TYPE_10GBASE_AOC:
                ecmd->supported = SUPPORTED_10000baseT_Full;
                break;
        case I40E_PHY_TYPE_SGMII:
@@ -1222,7 +1224,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        case ETH_SS_TEST:
                return I40E_TEST_LEN;
        case ETH_SS_STATS:
-               if (vsi == pf->vsi[pf->lan_vsi]) {
+               if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
                        int len = I40E_PF_STATS_LEN(netdev);
 
                        if (pf->lan_veb != I40E_NO_VEB)
@@ -1295,7 +1297,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                i += 2;
        }
        rcu_read_unlock();
-       if (vsi != pf->vsi[pf->lan_vsi])
+       if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
                return;
 
        if (pf->lan_veb != I40E_NO_VEB) {
@@ -1368,7 +1370,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-               if (vsi != pf->vsi[pf->lan_vsi])
+               if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
                        return;
 
                if (pf->lan_veb != I40E_NO_VEB) {
@@ -1413,6 +1415,8 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        data += ETH_GSTRING_LEN;
                }
                break;
+       default:
+               break;
        }
 }
 
@@ -1528,6 +1532,7 @@ static void i40e_diag_test(struct net_device *netdev,
                           struct ethtool_test *eth_test, u64 *data)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
+       bool if_running = netif_running(netdev);
        struct i40e_pf *pf = np->vsi->back;
 
        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -1535,6 +1540,12 @@ static void i40e_diag_test(struct net_device *netdev,
                netif_info(pf, drv, netdev, "offline testing starting\n");
 
                set_bit(__I40E_TESTING, &pf->state);
+               /* If the device is online then take it offline */
+               if (if_running)
+                       /* indicate we're in test mode */
+                       dev_close(netdev);
+               else
+                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
 
                /* Link test performed before hardware reset
                 * so autoneg doesn't interfere with test result
@@ -1557,6 +1568,9 @@ static void i40e_diag_test(struct net_device *netdev,
 
                clear_bit(__I40E_TESTING, &pf->state);
                i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+               if (if_running)
+                       dev_open(netdev);
        } else {
                /* Online tests */
                netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1654,6 +1668,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
        case ETHTOOL_ID_INACTIVE:
                i40e_led_set(hw, pf->led_status, false);
                break;
+       default:
+               break;
        }
 
        return 0;
@@ -2344,10 +2360,6 @@ static int i40e_set_channels(struct net_device *dev,
        /* update feature limits from largest to smallest supported values */
        /* TODO: Flow director limit, DCB etc */
 
-       /* cap RSS limit */
-       if (count > pf->rss_size_max)
-               count = pf->rss_size_max;
-
        /* use rss_reconfig to rebuild with new queue count and update traffic
         * class queue mapping
         */
@@ -2368,7 +2380,7 @@ static int i40e_set_channels(struct net_device *dev,
  *
  * Returns a u32 bitmap of flags.
  **/
-u32 i40e_get_priv_flags(struct net_device *dev)
+static u32 i40e_get_priv_flags(struct net_device *dev)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
index 05d883e4d4acc9310742e25885bd4e165155efbc..0357b31e4a5ce969dd19cec374139489be36b42b 100644 (file)
@@ -24,7 +24,6 @@
  *
  ******************************************************************************/
 
-
 #include <linux/if_ether.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -1447,7 +1446,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev,
        return 0;
 }
 
-
 static const struct net_device_ops i40e_fcoe_netdev_ops = {
        .ndo_open               = i40e_open,
        .ndo_stop               = i40e_close,
index 21e0f582031c5fffb1b2634ac86fdb06c04b6ce2..0d49e2d15d408c671c3acf581b10df5763fee7c3 100644 (file)
@@ -37,7 +37,6 @@
 #define I40E_FILTER_CONTEXT_DESC(R, i)  \
        (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
 
-
 /* receive queue descriptor filter status for FCoE */
 #define I40E_RX_DESC_FLTSTAT_FCMASK    0x3
 #define I40E_RX_DESC_FLTSTAT_NOMTCH    0x0     /* no ddp context match */
index 4627588f461346f292e8e03aca4967f622dbdc6c..0079ad7bcd0e1ff9c5fb985322a72d635418df14 100644 (file)
@@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits,
        if (ce_info->width < 32)
                mask = ((u32)1 << ce_info->width) - 1;
        else
-               mask = 0xFFFFFFFF;
+               mask = ~(u32)0;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
@@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits,
        if (ce_info->width < 64)
                mask = ((u64)1 << ce_info->width) - 1;
        else
-               mask = 0xFFFFFFFFFFFFFFFF;
+               mask = ~(u64)0;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
index 2757926f7805bb77b43fb692cc51e67556fe33e3..4bed881e3cb6af0f1a3ef2d240b84de182f1d93a 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 12
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -1507,7 +1507,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        vsi->tc_config.numtc = numtc;
        vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
        /* Number of queues per enabled TC */
-       num_tc_qps = vsi->alloc_queue_pairs/numtc;
+       /* In MFP case we can have a much lower count of MSIx
+        * vectors available and so we need to lower the used
+        * q count.
+        */
+       qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+       num_tc_qps = qcount / numtc;
        num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
 
        /* Setup queue offset/count for all TCs for given VSI */
@@ -1536,7 +1541,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        vsi->tc_config.tc_info[i].qoffset = offset;
                        vsi->tc_config.tc_info[i].qcount = qcount;
 
-                       /* find the power-of-2 of the number of queue pairs */
+                       /* find the next higher power-of-2 of num queue pairs */
                        num_qps = qcount;
                        pow = 0;
                        while (num_qps && ((1 << pow) < qcount)) {
@@ -1566,6 +1571,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 
        /* Set actual Tx/Rx queue pairs */
        vsi->num_queue_pairs = offset;
+       if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+               if (vsi->req_queue_pairs > 0)
+                       vsi->num_queue_pairs = vsi->req_queue_pairs;
+               else
+                       vsi->num_queue_pairs = pf->num_lan_msix;
+       }
 
        /* Scheduler section valid can only be set for ADD VSI */
        if (is_add) {
@@ -2388,20 +2399,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
        struct i40e_vsi *vsi = ring->vsi;
        cpumask_var_t mask;
 
-       if (ring->q_vector && ring->netdev) {
-               /* Single TC mode enable XPS */
-               if (vsi->tc_config.numtc <= 1 &&
-                   !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+       if (!ring->q_vector || !ring->netdev)
+               return;
+
+       /* Single TC mode enable XPS */
+       if (vsi->tc_config.numtc <= 1) {
+               if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
                        netif_set_xps_queue(ring->netdev,
                                            &ring->q_vector->affinity_mask,
                                            ring->queue_index);
-               } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
-                       /* Disable XPS to allow selection based on TC */
-                       bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
-                       netif_set_xps_queue(ring->netdev, mask,
-                                           ring->queue_index);
-                       free_cpumask_var(mask);
-               }
+       } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               /* Disable XPS to allow selection based on TC */
+               bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+               netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+               free_cpumask_var(mask);
        }
 }
 
@@ -2684,8 +2695,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
        u16 qoffset, qcount;
        int i, n;
 
-       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
-               return;
+       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+               /* Reset the TC information */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       rx_ring = vsi->rx_rings[i];
+                       tx_ring = vsi->tx_rings[i];
+                       rx_ring->dcb_tc = 0;
+                       tx_ring->dcb_tc = 0;
+               }
+       }
 
        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
                if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3813,6 +3831,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
                pci_disable_msix(pf->pdev);
                kfree(pf->msix_entries);
                pf->msix_entries = NULL;
+               kfree(pf->irq_pile);
+               pf->irq_pile = NULL;
        } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
                pci_disable_msi(pf->pdev);
        }
@@ -3830,6 +3850,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
 {
        int i;
 
+       i40e_stop_misc_vector(pf);
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               synchronize_irq(pf->msix_entries[0].vector);
+               free_irq(pf->msix_entries[0].vector, pf);
+       }
+
        i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
        for (i = 0; i < pf->num_alloc_vsi; i++)
                if (pf->vsi[i])
@@ -4101,7 +4127,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        if (pf->hw.func_caps.iscsi)
                enabled_tc =  i40e_get_iscsi_tc_map(pf);
        else
-               enabled_tc = pf->hw.func_caps.enabled_tcmap;
+               return 1; /* Only TC0 */
 
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -4151,11 +4177,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
                return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
 
-       /* MPF enabled and iSCSI PF type */
+       /* MFP enabled and iSCSI PF type */
        if (pf->hw.func_caps.iscsi)
                return i40e_get_iscsi_tc_map(pf);
        else
-               return pf->hw.func_caps.enabled_tcmap;
+               return i40e_pf_get_default_tc(pf);
 }
 
 /**
@@ -4545,6 +4571,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
        struct i40e_hw *hw = &pf->hw;
        int err = 0;
 
+       /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4))
+               goto out;
+
        /* Get the initial DCB configuration */
        err = i40e_init_dcb(hw);
        if (!err) {
@@ -5155,7 +5186,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        struct i40e_aqc_lldp_get_mib *mib =
                (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
        struct i40e_hw *hw = &pf->hw;
-       struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
        struct i40e_dcbx_config tmp_dcbx_cfg;
        bool need_reconfig = false;
        int ret = 0;
@@ -5188,8 +5218,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
 
        memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
        /* Store the old configuration */
-       tmp_dcbx_cfg = *dcbx_cfg;
+       memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg));
 
+       /* Reset the old DCBx configuration data */
+       memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
        /* Get updated DCBX data from firmware */
        ret = i40e_get_dcb_config(&pf->hw);
        if (ret) {
@@ -5198,20 +5230,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        }
 
        /* No change detected in DCBX configs */
-       if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+       if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+                   sizeof(tmp_dcbx_cfg))) {
                dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
                goto exit;
        }
 
-       need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
+       need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+                                              &hw->local_dcbx_config);
 
-       i40e_dcbnl_flush_apps(pf, dcbx_cfg);
+       i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
 
        if (!need_reconfig)
                goto exit;
 
        /* Enable DCB tagging only when more than one TC */
-       if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+       if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
                pf->flags |= I40E_FLAG_DCB_ENABLED;
        else
                pf->flags &= ~I40E_FLAG_DCB_ENABLED;
@@ -5232,8 +5266,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
 
        /* Wait for the PF's Tx queues to be disabled */
        ret = i40e_pf_wait_txq_disabled(pf);
-       if (!ret)
+       if (ret) {
+               /* Schedule PF reset to recover */
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               i40e_service_event_schedule(pf);
+       } else {
                i40e_pf_unquiesce_all_vsi(pf);
+       }
+
 exit:
        return ret;
 }
@@ -5305,9 +5345,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
  * @pf: board private structure
  **/
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 {
-       int val, fcnt_prog;
+       u32 val, fcnt_prog;
 
        val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
        fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
@@ -5315,12 +5355,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 }
 
 /**
- * i40e_get_current_fd_count - Get the count of total FD filters programmed
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
  * @pf: board private structure
  **/
-int i40e_get_current_fd_count(struct i40e_pf *pf)
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
 {
-       int val, fcnt_prog;
+       u32 val, fcnt_prog;
+
        val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
        fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
                    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
@@ -5328,6 +5369,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
        return fcnt_prog;
 }
 
+/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+       u32 val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+       fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+                   ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+                    I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+       return fcnt_prog;
+}
+
 /**
  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
  * @pf: board private structure
@@ -5342,7 +5398,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
        /* Check if, FD SB or ATR was auto disabled and if there is enough room
         * to re-enable
         */
-       fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+       fcnt_prog = i40e_get_global_fd_count(pf);
        fcnt_avail = pf->fdir_pf_filter_count;
        if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
            (pf->fd_add_err == 0) ||
@@ -5364,13 +5420,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
 }
 
 #define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
 /**
  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
  * @pf: board private structure
  **/
 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
 {
+       unsigned long min_flush_time;
        int flush_wait_retry = 50;
+       bool disable_atr = false;
+       int fd_room;
        int reg;
 
        if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
@@ -5378,9 +5438,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
 
        if (time_after(jiffies, pf->fd_flush_timestamp +
                                (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
-               set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               /* If the flush is happening too quick and we have mostly
+                * SB rules we should not re-enable ATR for some time.
+                */
+               min_flush_time = pf->fd_flush_timestamp
+                               + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+               fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+               if (!(time_after(jiffies, min_flush_time)) &&
+                   (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+                       dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+                       disable_atr = true;
+               }
+
                pf->fd_flush_timestamp = jiffies;
-               pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
                pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                /* flush all filters */
                wr32(&pf->hw, I40E_PFQF_CTL_1,
@@ -5400,10 +5471,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
                } else {
                        /* replay sideband filters */
                        i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-
-                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       if (!disable_atr)
+                               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                        clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                        dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
                }
@@ -5414,7 +5483,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
  * @pf: board private structure
  **/
-int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
 {
        return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
 }
@@ -5440,9 +5509,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
        if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
                return;
 
-       if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
-           (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
-           (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                i40e_fdir_flush_and_replay(pf);
 
        i40e_fdir_check_and_reenable(pf);
@@ -5565,7 +5632,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
        int i, v;
 
        /* If we're down or resetting, just bail */
-       if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
                return;
 
        /* for each VSI/netdev
@@ -5871,6 +5939,74 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
        }
 }
 
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       int aq_ret;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.vf_num = 0;
+       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+               return;
+       }
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+       ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s: update vsi switch failed, aq_err=%d\n",
+                        __func__, vsi->back->hw.aq.asq_last_status);
+       }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       int aq_ret;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.vf_num = 0;
+       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+               return;
+       }
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+       ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "%s: update vsi switch failed, aq_err=%d\n",
+                        __func__, vsi->back->hw.aq.asq_last_status);
+       }
+}
+
 /**
  * i40e_config_bridge_mode - Configure the HW bridge mode
  * @veb: pointer to the bridge instance
@@ -6305,13 +6441,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                }
        }
 
-       msleep(75);
-       ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               msleep(75);
+               ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+               if (ret)
+                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+                                pf->hw.aq.asq_last_status);
        }
-
        /* reinit the misc interrupt */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                ret = i40e_setup_misc_vector(pf);
@@ -6698,6 +6835,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        vsi->idx = vsi_idx;
        vsi->rx_itr_setting = pf->rx_itr_default;
        vsi->tx_itr_setting = pf->tx_itr_default;
+       vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+                               pf->rss_table_size : 64;
        vsi->netdev_registered = false;
        vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
        INIT_LIST_HEAD(&vsi->mac_filter_list);
@@ -6891,15 +7030,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
  *
  * Work with the OS to set up the MSIX vectors needed.
  *
- * Returns 0 on success, negative on failure
+ * Returns the number of vectors reserved or negative on failure
  **/
 static int i40e_init_msix(struct i40e_pf *pf)
 {
-       i40e_status err = 0;
        struct i40e_hw *hw = &pf->hw;
-       int other_vecs = 0;
+       int vectors_left;
        int v_budget, i;
-       int vec;
+       int v_actual;
 
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
                return -ENODEV;
@@ -6921,24 +7059,62 @@ static int i40e_init_msix(struct i40e_pf *pf)
         * If we can't get what we want, we'll simplify to nearly nothing
         * and try again.  If that still fails, we punt.
         */
-       pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
-       pf->num_vmdq_msix = pf->num_vmdq_qps;
-       other_vecs = 1;
-       other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
-       if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
-               other_vecs++;
-
-       /* Scale down if necessary, and the rings will share vectors */
-       pf->num_lan_msix = min_t(int, pf->num_lan_msix,
-                       (hw->func_caps.num_msix_vectors - other_vecs));
-       v_budget = pf->num_lan_msix + other_vecs;
+       vectors_left = hw->func_caps.num_msix_vectors;
+       v_budget = 0;
+
+       /* reserve one vector for miscellaneous handler */
+       if (vectors_left) {
+               v_budget++;
+               vectors_left--;
+       }
+
+       /* reserve vectors for the main PF traffic queues */
+       pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+       vectors_left -= pf->num_lan_msix;
+       v_budget += pf->num_lan_msix;
+
+       /* reserve one vector for sideband flow director */
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+               if (vectors_left) {
+                       v_budget++;
+                       vectors_left--;
+               } else {
+                       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               }
+       }
 
 #ifdef I40E_FCOE
+       /* can we reserve enough for FCoE? */
        if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-               pf->num_fcoe_msix = pf->num_fcoe_qps;
+               if (!vectors_left)
+                       pf->num_fcoe_msix = 0;
+               else if (vectors_left >= pf->num_fcoe_qps)
+                       pf->num_fcoe_msix = pf->num_fcoe_qps;
+               else
+                       pf->num_fcoe_msix = 1;
                v_budget += pf->num_fcoe_msix;
+               vectors_left -= pf->num_fcoe_msix;
        }
+
 #endif
+       /* any vectors left over go for VMDq support */
+       if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+               int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
+               int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+
+               /* if we're short on vectors for what's desired, we limit
+                * the queues per vmdq.  If this is still more than are
+                * available, the user will need to change the number of
+                * queues/vectors used by the PF later with the ethtool
+                * channels command
+                */
+               if (vmdq_vecs < vmdq_vecs_wanted)
+                       pf->num_vmdq_qps = 1;
+               pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+               v_budget += vmdq_vecs;
+               vectors_left -= vmdq_vecs;
+       }
 
        pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
                                   GFP_KERNEL);
@@ -6947,9 +7123,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
 
        for (i = 0; i < v_budget; i++)
                pf->msix_entries[i].entry = i;
-       vec = i40e_reserve_msix_vectors(pf, v_budget);
+       v_actual = i40e_reserve_msix_vectors(pf, v_budget);
 
-       if (vec != v_budget) {
+       if (v_actual != v_budget) {
                /* If we have limited resources, we will start with no vectors
                 * for the special features and then allocate vectors to some
                 * of these features based on the policy and at the end disable
@@ -6962,26 +7138,30 @@ static int i40e_init_msix(struct i40e_pf *pf)
                pf->num_vmdq_msix = 0;
        }
 
-       if (vec < I40E_MIN_MSIX) {
+       if (v_actual < I40E_MIN_MSIX) {
                pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
                kfree(pf->msix_entries);
                pf->msix_entries = NULL;
                return -ENODEV;
 
-       } else if (vec == I40E_MIN_MSIX) {
+       } else if (v_actual == I40E_MIN_MSIX) {
                /* Adjust for minimal MSIX use */
                pf->num_vmdq_vsis = 0;
                pf->num_vmdq_qps = 0;
                pf->num_lan_qps = 1;
                pf->num_lan_msix = 1;
 
-       } else if (vec != v_budget) {
+       } else if (v_actual != v_budget) {
+               int vec;
+
                /* reserve the misc vector */
-               vec--;
+               vec = v_actual - 1;
 
                /* Scale vector usage down */
                pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
                pf->num_vmdq_vsis = 1;
+               pf->num_vmdq_qps = 1;
+               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
 
                /* partition out the remaining vectors */
                switch (vec) {
@@ -7007,10 +7187,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
                                vec--;
                        }
 #endif
-                       pf->num_lan_msix = min_t(int, (vec / 2),
-                                                pf->num_lan_qps);
-                       pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
-                                                 I40E_DEFAULT_NUM_VMDQ_VSI);
+                       /* give the rest to the PF */
+                       pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
                        break;
                }
        }
@@ -7027,7 +7205,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
                pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
        }
 #endif
-       return err;
+       return v_actual;
 }
 
 /**
@@ -7104,11 +7282,12 @@ err_out:
  **/
 static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
 {
-       int err = 0;
+       int vectors = 0;
+       ssize_t size;
 
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-               err = i40e_init_msix(pf);
-               if (err) {
+               vectors = i40e_init_msix(pf);
+               if (vectors < 0) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
 #ifdef I40E_FCOE
                                       I40E_FLAG_FCOE_ENABLED   |
@@ -7128,18 +7307,26 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            (pf->flags & I40E_FLAG_MSI_ENABLED)) {
                dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
-               err = pci_enable_msi(pf->pdev);
-               if (err) {
-                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
+               vectors = pci_enable_msi(pf->pdev);
+               if (vectors < 0) {
+                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+                                vectors);
                        pf->flags &= ~I40E_FLAG_MSI_ENABLED;
                }
+               vectors = 1;  /* one MSI or Legacy vector */
        }
 
        if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
                dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
 
+       /* set up vector assignment tracking */
+       size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+       pf->irq_pile = kzalloc(size, GFP_KERNEL);
+       pf->irq_pile->num_entries = vectors;
+       pf->irq_pile->search_hint = 0;
+
        /* track first vector for misc interrupts */
-       err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+       (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
 }
 
 /**
@@ -7189,6 +7376,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
 static int i40e_config_rss(struct i40e_pf *pf)
 {
        u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_hw *hw = &pf->hw;
        u32 lut = 0;
        int i, j;
@@ -7206,15 +7394,14 @@ static int i40e_config_rss(struct i40e_pf *pf)
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
+       vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
        /* Check capability and Set table size and register per hw expectation*/
        reg_val = rd32(hw, I40E_PFQF_CTL_0);
-       if (hw->func_caps.rss_table_size == 512) {
+       if (pf->rss_table_size == 512)
                reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-               pf->rss_table_size = 512;
-       } else {
-               pf->rss_table_size = 128;
+       else
                reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-       }
        wr32(hw, I40E_PFQF_CTL_0, reg_val);
 
        /* Populate the LUT with max no. of queues in round robin fashion */
@@ -7227,7 +7414,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
                 * If LAN VSI is the only consumer for RSS then this requirement
                 * is not necessary.
                 */
-               if (j == pf->rss_size)
+               if (j == vsi->rss_size)
                        j = 0;
                /* lut = 4-byte sliding window of 4 lut entries */
                lut = (lut << 8) | (j &
@@ -7251,15 +7438,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
  **/
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
 {
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       int new_rss_size;
+
        if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
                return 0;
 
-       queue_count = min_t(int, queue_count, pf->rss_size_max);
+       new_rss_size = min_t(int, queue_count, pf->rss_size_max);
 
-       if (queue_count != pf->rss_size) {
+       if (queue_count != vsi->num_queue_pairs) {
+               vsi->req_queue_pairs = queue_count;
                i40e_prep_for_reset(pf);
 
-               pf->rss_size = queue_count;
+               pf->rss_size = new_rss_size;
 
                i40e_reset_and_rebuild(pf, true);
                i40e_config_rss(pf);
@@ -7432,6 +7623,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
         */
        pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
        pf->rss_size = 1;
+       pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
        if (pf->hw.func_caps.rss) {
@@ -7509,22 +7701,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
        pf->qp_pile->search_hint = 0;
 
-       /* set up vector assignment tracking */
-       size = sizeof(struct i40e_lump_tracking)
-               + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
-       pf->irq_pile = kzalloc(size, GFP_KERNEL);
-       if (!pf->irq_pile) {
-               kfree(pf->qp_pile);
-               err = -ENOMEM;
-               goto sw_init_done;
-       }
-       pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
-       pf->irq_pile->search_hint = 0;
-
        pf->tx_timeout_recovery_level = 1;
 
        mutex_init(&pf->switch_mutex);
 
+       /* If NPAR is enabled nudge the Tx scheduler */
+       if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
+               i40e_set_npar_bw_setting(pf);
+
 sw_init_done:
        return err;
 }
@@ -7868,7 +8052,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 }
 #endif /* HAVE_BRIDGE_ATTRIBS */
 
-const struct net_device_ops i40e_netdev_ops = {
+static const struct net_device_ops i40e_netdev_ops = {
        .ndo_open               = i40e_open,
        .ndo_stop               = i40e_close,
        .ndo_start_xmit         = i40e_lan_xmit_frame,
@@ -9183,14 +9367,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
        i40e_link_event(pf);
 
-       /* Initialize user-specific link properties */
-       pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
-                                 I40E_AQ_AN_COMPLETED) ? true : false);
-
-       /* fill in link information and enable LSE reporting */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
-       i40e_link_event(pf);
-
        /* Initialize user-specific link properties */
        pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
                                  I40E_AQ_AN_COMPLETED) ? true : false);
@@ -9258,7 +9434,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                        pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                        dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
                }
-               pf->num_lan_qps = pf->rss_size_max;
+               pf->num_lan_qps = max_t(int, pf->rss_size_max,
+                                       num_online_cpus());
+               pf->num_lan_qps = min_t(int, pf->num_lan_qps,
+                                       pf->hw.func_caps.num_tx_qp);
+
                queues_left -= pf->num_lan_qps;
        }
 
@@ -9397,6 +9577,7 @@ static void i40e_print_features(struct i40e_pf *pf)
 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct i40e_aq_get_phy_abilities_resp abilities;
+       unsigned long ioremap_len;
        struct i40e_pf *pf;
        struct i40e_hw *hw;
        static u16 pfs_found;
@@ -9448,8 +9629,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        hw = &pf->hw;
        hw->back = pf;
-       hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
-                             pci_resource_len(pdev, 0));
+
+       ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
+                           I40E_MAX_CSR_SPACE);
+
+       hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
        if (!hw->hw_addr) {
                err = -EIO;
                dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
@@ -9527,7 +9711,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev,
                         "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 
-
        i40e_verify_eeprom(pf);
 
        /* Rev 0 hardware was never productized */
@@ -9662,13 +9845,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
 
-       msleep(75);
-       err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
-       if (err) {
-               dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               msleep(75);
+               err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+               if (err)
+                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+                                pf->hw.aq.asq_last_status);
        }
-
        /* The main driver is (mostly) up and happy. We need to set this state
         * before setting up the misc vector or we get a race and the vector
         * ends up disabled forever.
@@ -9777,7 +9961,6 @@ err_configure_lan_hmc:
        (void)i40e_shutdown_lan_hmc(hw);
 err_init_lan_hmc:
        kfree(pf->qp_pile);
-       kfree(pf->irq_pile);
 err_sw_init:
 err_adminq_setup:
        (void)i40e_shutdown_adminq(hw);
@@ -9818,6 +10001,7 @@ static void i40e_remove(struct pci_dev *pdev)
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
        cancel_work_sync(&pf->service_task);
+       i40e_fdir_teardown(pf);
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
@@ -9844,12 +10028,6 @@ static void i40e_remove(struct pci_dev *pdev)
        if (pf->vsi[pf->lan_vsi])
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
-       i40e_stop_misc_vector(pf);
-       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-               synchronize_irq(pf->msix_entries[0].vector);
-               free_irq(pf->msix_entries[0].vector, pf);
-       }
-
        /* shutdown and destroy the HMC */
        if (pf->hw.hmc.hmc_obj) {
                ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9882,7 +10060,6 @@ static void i40e_remove(struct pci_dev *pdev)
        }
 
        kfree(pf->qp_pile);
-       kfree(pf->irq_pile);
        kfree(pf->vsi);
 
        iounmap(pf->hw.hw_addr);
@@ -10003,6 +10180,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
        wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
+       i40e_clear_interrupt_scheme(pf);
+
        if (system_state == SYSTEM_POWER_OFF) {
                pci_wake_from_d3(pdev, pf->wol_en);
                pci_set_power_state(pdev, PCI_D3hot);
@@ -10108,9 +10287,6 @@ static int __init i40e_init_module(void)
                i40e_driver_string, i40e_driver_version_str);
        pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
 
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-       i40e_configfs_init();
-#endif /* CONFIG_CONFIGFS_FS */
        i40e_dbg_init();
        return pci_register_driver(&i40e_driver);
 }
@@ -10126,8 +10302,5 @@ static void __exit i40e_exit_module(void)
 {
        pci_unregister_driver(&i40e_driver);
        i40e_dbg_exit();
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-       i40e_configfs_exit();
-#endif /* CONFIG_CONFIGFS_FS */
 }
 module_exit(i40e_exit_module);
index 28429c8fbc9898a65514139bf6cf777fa3a2c4bd..e49acd2accd30917aff74dcb681fcede09949112 100644 (file)
@@ -171,8 +171,8 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  *
  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  **/
-i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
-                                    u16 *data)
+static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+                                           u16 *data)
 {
        i40e_status ret_code = I40E_ERR_TIMEOUT;
        u32 sr_reg;
@@ -200,7 +200,6 @@ i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
                        *data = (u16)((sr_reg &
                                       I40E_GLNVM_SRDATA_RDDATA_MASK)
                                    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
-                       *data = le16_to_cpu(*data);
                }
        }
        if (ret_code)
@@ -237,8 +236,8 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  * method. The buffer read is preceded by the NVM ownership take
  * and followed by the release.
  **/
-i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
-                                      u16 *words, u16 *data)
+static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+                                             u16 *words, u16 *data)
 {
        i40e_status ret_code = 0;
        u16 index, word;
@@ -725,9 +724,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 {
        i40e_status status;
        enum i40e_nvmupd_cmd upd_cmd;
+       bool retry_attempt = false;
 
        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
 
+retry:
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -771,6 +772,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
                *errno = -ESRCH;
                break;
        }
+
+       /* In some circumstances, a multi-write transaction takes longer
+        * than the default 3 minute timeout on the write semaphore.  If
+        * the write failed with an EBUSY status, this is likely the problem,
+        * so here we try to reacquire the semaphore then retry the write.
+        * We only do one retry, then give up.
+        */
+       if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+           !retry_attempt) {
+               i40e_status old_status = status;
+               u32 old_asq_status = hw->aq.asq_last_status;
+               u32 gtime;
+
+               gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+               if (gtime >= hw->nvm.hw_semaphore_timeout) {
+                       i40e_debug(hw, I40E_DEBUG_ALL,
+                                  "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+                                  gtime, hw->nvm.hw_semaphore_timeout);
+                       i40e_release_nvm(hw);
+                       status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+                       if (status) {
+                               i40e_debug(hw, I40E_DEBUG_ALL,
+                                          "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+                                          hw->aq.asq_last_status);
+                               status = old_status;
+                               hw->aq.asq_last_status = old_asq_status;
+                       } else {
+                               retry_attempt = true;
+                               goto retry;
+                       }
+               }
+       }
+
        return status;
 }
 
index 8cab460865f5bb27cc5b7d78c405bf78d14d7a79..fea0d37ecc722af58d052476829c5f8ec53ef916 100644 (file)
@@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 
 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
                                u16 *fw_major_version, u16 *fw_minor_version,
+                               u32 *fw_build,
                                u16 *api_major_version, u16 *api_minor_version,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
index f8c863bfa6f7c3af16519b2b0ccbf5a393bd41fd..f5a50b9366cbe6fe7b368e6f3255ff08f4099147 100644 (file)
@@ -228,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
                         "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                         fd_data->pctype, fd_data->fd_id, ret);
                err = true;
-       } else {
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                if (add)
                        dev_info(&pf->pdev->dev,
                                 "Filter OK for PCTYPE %d loc = %d\n",
@@ -303,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                         "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                         fd_data->pctype, fd_data->fd_id, ret);
                err = true;
-       } else {
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                if (add)
                        dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
                                 fd_data->pctype, fd_data->fd_id);
@@ -376,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                                 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                                 fd_data->pctype, fd_data->fd_id, ret);
                        err = true;
-               } else {
+               } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                        if (add)
                                dev_info(&pf->pdev->dev,
                                         "Filter OK for PCTYPE %d loc = %d\n",
@@ -471,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                        dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
                                 rx_desc->wb.qword0.hi_dword.fd_id);
 
+               /* Check if the programming error is for ATR.
+                * If so, auto disable ATR and set a state for
+                * flush in progress. Next time we come here if flush is in
+                * progress do nothing, once flush is complete the state will
+                * be cleared.
+                */
+               if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+                       return;
+
                pf->fd_add_err++;
                /* store the current atr filter count */
                pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
 
+               if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+                       set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               }
+
                /* filter programming failed most likely due to table full */
-               fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+               fcnt_prog = i40e_get_global_fd_count(pf);
                fcnt_avail = pf->fdir_pf_filter_count;
                /* If ATR is running fcnt_prog can quickly change,
                 * if we are very close to full, it makes sense to disable
@@ -586,6 +601,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -595,10 +624,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-       u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-                       ? ring->next_to_use
-                       : ring->next_to_use + ring->count);
-       return ntu - ring->next_to_clean;
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
 }
 
 /**
@@ -607,6 +642,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
        struct i40e_pf *pf = tx_ring->vsi->back;
        bool ret = false;
@@ -624,41 +661,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-           (tx_pending >= I40E_MIN_DESC_PENDING)) {
+       if ((tx_done_old == tx_done) && tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
-       } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-                  (tx_pending < I40E_MIN_DESC_PENDING) &&
-                  (tx_pending > 0)) {
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
                if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
                        dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
                                 tx_pending, tx_ring->queue_index);
                pf->tx_sluggish_count++;
        } else {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_done;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
        return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -749,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
                }
 
+               prefetch(tx_desc);
+
                /* update budget accounting */
                budget--;
        } while (likely(budget));
@@ -1038,7 +1061,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
                        for (i = 0; i < rx_ring->count; i++) {
                                rx_bi = &rx_ring->rx_bi[i];
                                rx_bi->dma = 0;
-                               rx_bi->hdr_buf = 0;
+                               rx_bi->hdr_buf = NULL;
                        }
                }
        }
@@ -1354,10 +1377,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        struct iphdr *iph;
        __sum16 csum;
 
-       ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
 
        skb->ip_summed = CHECKSUM_NONE;
 
@@ -1920,6 +1943,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
 
+       if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               return;
+
        /* if sampling is disabled do nothing */
        if (!tx_ring->atr_sample_rate)
                return;
@@ -2043,6 +2069,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                tx_flags |= I40E_TX_FLAGS_SW_VLAN;
        }
 
+       if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+               goto out;
+
        /* Insert 802.1p priority into VLAN header */
        if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
            (skb->priority != TC_PRIO_CONTROL)) {
@@ -2063,6 +2092,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                        tx_flags |= I40E_TX_FLAGS_HW_VLAN;
                }
        }
+
+out:
        *flags = tx_flags;
        return 0;
 }
@@ -2187,8 +2218,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+       u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       break;
+               default:
+                       return;
+               }
                network_hdr_len = skb_inner_network_header_len(skb);
                this_ip_hdr = inner_ip_hdr(skb);
                this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -2211,8 +2250,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
 
                /* Now set the ctx descriptor fields */
                *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
-                                       I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                                  I40E_TXD_CTX_UDP_TUNNELING            |
+                                  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
+                                  l4_tunnel                             |
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
@@ -2350,6 +2389,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
        return __i40e_maybe_stop_tx(tx_ring, size);
 }
 
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+                              const u8 hdr_len)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 1;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                   I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               size = hdr_len;
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               if (size < skb_shinfo(skb)->gso_size) {
+                                       linearize = true;
+                                       break;
+                               }
+                               j = 1;
+                               size -= skb_shinfo(skb)->gso_size;
+                               if (size)
+                                       j++;
+                               size += hdr_len;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -2607,6 +2707,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
+       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+               if (skb_linearize(skb))
+                       goto out_drop;
+
        skb_tx_timestamp(skb);
 
        /* always enable CRC insertion offload */
index 38449b230d60979fa8ccce82844a6771c574daae..4b0b8102cdc39c2529f49c18d6b1cbc61c48c341 100644 (file)
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_DATA_PER_TXD  8192
 
index 90069396bb28fd0033d8e1de853c2a64bc5109fd..83032d2c2275c254319ea0478c88fb6c8fd9c430 100644 (file)
@@ -1143,7 +1143,7 @@ struct i40e_hw_port_stats {
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
 #define I40E_SR_PBA_FLAGS                      0x15
 #define I40E_SR_PBA_BLOCK_PTR                  0x16
-#define I40E_SR_NVM_IMAGE_VERSION              0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
 #define I40E_SR_NVM_EETRACK_LO                 0x2D
index 493335caa27600b806c8d02a5ce52b52e66661f6..7cc635e4c2e480a315534cb6cc49191fa27595c0 100644 (file)
@@ -403,9 +403,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
                vf->lan_vsi_index = vsi->idx;
                vf->lan_vsi_id = vsi->id;
-               dev_info(&pf->pdev->dev,
-                        "VF %d assigned LAN VSI index %d, VSI id %d\n",
-                        vf->vf_id, vsi->idx, vsi->id);
                /* If the port VLAN has been configured and then the
                 * VF driver was removed then the VSI port VLAN
                 * configuration was destroyed.  Check if there is
@@ -712,74 +709,6 @@ complete_reset:
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
 
-/**
- * i40e_enable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * enable switch loop back or die - no point in a return value
- **/
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
-{
-       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-       struct i40e_vsi_context ctxt;
-       int aq_ret;
-
-       ctxt.seid = pf->main_vsi_seid;
-       ctxt.pf_num = pf->hw.pf_id;
-       ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s couldn't get pf vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
-               return;
-       }
-       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-       ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
-       }
-}
-
-/**
- * i40e_disable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * disable switch loop back or die - no point in a return value
- **/
-void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
-{
-       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-       struct i40e_vsi_context ctxt;
-       int aq_ret;
-
-       ctxt.seid = pf->main_vsi_seid;
-       ctxt.pf_num = pf->hw.pf_id;
-       ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s couldn't get pf vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
-               return;
-       }
-       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-       ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
-               dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
-       }
-}
-
 /**
  * i40e_free_vfs
  * @pf: pointer to the pf structure
@@ -832,7 +761,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
                }
-               i40e_disable_pf_switch_lb(pf);
        } else {
                dev_warn(&pf->pdev->dev,
                         "unable to disable SR-IOV because VFs are assigned.\n");
index ef777a62e393cdf23b68298f5198b152c7383dc9..21db113a64fa3710d87f0111a37c1d31228efe2d 100644 (file)
@@ -126,7 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
 
 void i40e_vc_notify_link_state(struct i40e_pf *pf);
 void i40e_vc_notify_reset(struct i40e_pf *pf);
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf);
-void i40e_disable_pf_switch_lb(struct i40e_pf *pf);
 
 #endif /* _I40E_VIRTCHNL_PF_H_ */
index 60f04e96a80e0a440faacf2c620777992259426b..ef43d68f67b30632b504962f1fd627799f16dd7f 100644 (file)
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
        u16 asq_buf_size;               /* send queue buffer size */
        u16 fw_maj_ver;                 /* firmware major version */
        u16 fw_min_ver;                 /* firmware minor version */
+       u32 fw_build;                   /* firmware build number */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        bool nvm_release_on_done;
index 50b0ee54fc0611f99139b742324db2bdb4fa4f14..f07b9ff2b823167f52055de53a2f9db98ac30586 100644 (file)
@@ -85,9 +85,8 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 {
        struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
        u16 len = le16_to_cpu(aq_desc->datalen);
-       u8 *aq_buffer = (u8 *)buffer;
-       u32 data[4];
-       u32 i = 0;
+       u8 *buf = (u8 *)buffer;
+       u16 i = 0;
 
        if ((!(mask & hw->debug_mask)) || (desc == NULL))
                return;
@@ -109,29 +108,30 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
                   le32_to_cpu(aq_desc->params.external.addr_low));
 
        if ((buffer != NULL) && (aq_desc->datalen != 0)) {
-               memset(data, 0, sizeof(data));
                i40e_debug(hw, mask, "AQ CMD Buffer:\n");
                if (buf_len < len)
                        len = buf_len;
-               for (i = 0; i < len; i++) {
-                       data[((i % 16) / 4)] |=
-                               ((u32)aq_buffer[i]) << (8 * (i % 4));
-                       if ((i % 16) == 15) {
-                               i40e_debug(hw, mask,
-                                          "\t0x%04X  %08X %08X %08X %08X\n",
-                                          i - 15, le32_to_cpu(data[0]),
-                                          le32_to_cpu(data[1]),
-                                          le32_to_cpu(data[2]),
-                                          le32_to_cpu(data[3]));
-                               memset(data, 0, sizeof(data));
-                       }
+               /* write the full 16-byte chunks */
+               for (i = 0; i < (len - 16); i += 16)
+                       i40e_debug(hw, mask,
+                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+                                  i, buf[i], buf[i + 1], buf[i + 2],
+                                  buf[i + 3], buf[i + 4], buf[i + 5],
+                                  buf[i + 6], buf[i + 7], buf[i + 8],
+                                  buf[i + 9], buf[i + 10], buf[i + 11],
+                                  buf[i + 12], buf[i + 13], buf[i + 14],
+                                  buf[i + 15]);
+               /* write whatever's left over without overrunning the buffer */
+               if (i < len) {
+                       char d_buf[80];
+                       int j = 0;
+
+                       memset(d_buf, 0, sizeof(d_buf));
+                       j += sprintf(d_buf, "\t0x%04X ", i);
+                       while (i < len)
+                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
+                       i40e_debug(hw, mask, "%s\n", d_buf);
                }
-               if ((i % 16) != 0)
-                       i40e_debug(hw, mask, "\t0x%04X  %08X %08X %08X %08X\n",
-                                  i - (i % 16), le32_to_cpu(data[0]),
-                                  le32_to_cpu(data[1]),
-                                  le32_to_cpu(data[2]),
-                                  le32_to_cpu(data[3]));
        }
 }
 
@@ -542,7 +542,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
        I40E_PTT_UNUSED_ENTRY(255)
 };
 
-
 /**
  * i40e_aq_send_msg_to_pf
  * @hw: pointer to the hardware structure
index 9173834825ac4cc1bbe5c3ecbc85e77db693c76f..58e37a44b80a10233f00d004ee8fb9f5d496c12e 100644 (file)
@@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
 void i40e_idle_aq(struct i40e_hw *hw);
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
-                                            bool unloading);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
index fc7e2d0b755cd1c506417b52ed1fa776d961b961..d9f3db542c5f272290ebd10df36ab8fe7d92ab12 100644 (file)
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -135,10 +149,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-       u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-                       ? ring->next_to_use
-                       : ring->next_to_use + ring->count);
-       return ntu - ring->next_to_clean;
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
 }
 
 /**
@@ -147,6 +167,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
        bool ret = false;
 
@@ -163,36 +185,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-           (tx_pending >= I40E_MIN_DESC_PENDING)) {
+       if ((tx_done_old == tx_done) && tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
-       } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
-                  !(tx_pending < I40E_MIN_DESC_PENDING) ||
-                  !(tx_pending > 0)) {
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_done;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
        return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -283,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
                }
 
+               prefetch(tx_desc);
+
                /* update budget accounting */
                budget--;
        } while (likely(budget));
@@ -536,7 +544,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
                        for (i = 0; i < rx_ring->count; i++) {
                                rx_bi = &rx_ring->rx_bi[i];
                                rx_bi->dma = 0;
-                               rx_bi->hdr_buf = 0;
+                               rx_bi->hdr_buf = NULL;
                        }
                }
        }
@@ -852,10 +860,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        struct iphdr *iph;
        __sum16 csum;
 
-       ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                     (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+                    (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
 
        skb->ip_summed = CHECKSUM_NONE;
 
@@ -1405,17 +1413,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1456,8 +1463,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+       u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       break;
+               default:
+                       return;
+               }
                network_hdr_len = skb_inner_network_header_len(skb);
                this_ip_hdr = inner_ip_hdr(skb);
                this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -1473,22 +1488,23 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
                *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
-                                       I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                                  I40E_TXD_CTX_UDP_TUNNELING            |
+                                  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
+                                  l4_tunnel                             |
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
+
 
        } else {
                network_hdr_len = skb_network_header_len(skb);
@@ -1579,6 +1595,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+                              const u8 hdr_len)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 1;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                   I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               size = hdr_len;
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               if (size < skb_shinfo(skb)->gso_size) {
+                                       linearize = true;
+                                       break;
+                               }
+                               j = 1;
+                               size -= skb_shinfo(skb)->gso_size;
+                               if (size)
+                                       j++;
+                               size += hdr_len;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -1853,6 +1930,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
+       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+               if (skb_linearize(skb))
+                       goto out_drop;
+
        skb_tx_timestamp(skb);
 
        /* always enable CRC insertion offload */
index ffdda716813e137d0f3671f91c2ac0cc219e71c6..1e49bb1fbac1f0de59444626cc9645b72aeac0da 100644 (file)
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_DATA_PER_TXD  8192
 
index a2693865594a88091b80f57ce86320d0a8afd554..eba6e4b34f70c2f5b577f957f1a6eaf1548f9545 100644 (file)
@@ -1116,7 +1116,7 @@ struct i40e_hw_port_stats {
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD               0x00
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
-#define I40E_SR_NVM_IMAGE_VERSION              0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
 #define I40E_SR_NVM_EETRACK_LO                 0x2D
index c5ffaccb59d3c825494c998dd687b48d5f389bdd..b68b73163311d924efa98eee37bf282804e3a043 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <linux/uaccess.h>
 
-
 struct i40evf_stats {
        char stat_string[ETH_GSTRING_LEN];
        int stat_offset;
@@ -642,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        if (!indir)
                return 0;
 
-       for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-               hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
-               indir[j++] = hlut_val & 0xff;
-               indir[j++] = (hlut_val >> 8) & 0xff;
-               indir[j++] = (hlut_val >> 16) & 0xff;
-               indir[j++] = (hlut_val >> 24) & 0xff;
+       if (indir) {
+               for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+                       hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+                       indir[j++] = hlut_val & 0xff;
+                       indir[j++] = (hlut_val >> 8) & 0xff;
+                       indir[j++] = (hlut_val >> 16) & 0xff;
+                       indir[j++] = (hlut_val >> 24) & 0xff;
+               }
        }
        return 0;
 }
index 31d35e200f04f2b641bc42492ce9a891350737db..f44911df286acac2c6beb797b5e02a72e1895547 100644 (file)
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.2.3"
+#define DRV_VERSION "1.2.6"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -2009,7 +2009,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
  *
  * This task completes the work that was begun in probe. Due to the nature
  * of VF-PF communications, we may need to wait tens of milliseconds to get
- * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * responses back from the PF. Rather than busy-wait in probe and bog down the
  * whole system, we'll do it in a task so we can sleep.
  * This task only runs during driver init. Once we've established
  * communications with the PF driver and set up our netdev, the watchdog
@@ -2400,7 +2400,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 
 /**
- * i40evf_resume - Power managment resume routine
+ * i40evf_resume - Power management resume routine
  * @pdev: PCI device information struct
  *
  * Called when the system (VM) is resumed from sleep/suspend.
index d20fc8ed11f1574a2ae0fa4649be23fe204e1308..52d01b8b01edb64eb148f0a861b1d59ff3262084 100644 (file)
@@ -30,7 +30,7 @@
  *
  * Neither the 82576 nor the 82580 offer registers wide enough to hold
  * nanoseconds time values for very long. For the 82580, SYSTIM always
- * counts nanoseconds, but the upper 24 bits are not availible. The
+ * counts nanoseconds, but the upper 24 bits are not available. The
  * frequency is adjusted by changing the 32 bit fractional nanoseconds
  * register, TIMINCA.
  *
@@ -358,7 +358,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
 static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
 {
        u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
-       u32 mask[IGB_N_SDP] = {
+       static const u32 mask[IGB_N_SDP] = {
                E1000_CTRL_SDP0_DIR,
                E1000_CTRL_SDP1_DIR,
                E1000_CTRL_EXT_SDP2_DIR,
@@ -373,16 +373,16 @@ static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
 
 static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
 {
-       struct e1000_hw *hw = &igb->hw;
-       u32 aux0_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux0_sel_sdp[IGB_N_SDP] = {
                AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
        };
-       u32 aux1_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux1_sel_sdp[IGB_N_SDP] = {
                AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
        };
-       u32 ts_sdp_en[IGB_N_SDP] = {
+       static const u32 ts_sdp_en[IGB_N_SDP] = {
                TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
        };
+       struct e1000_hw *hw = &igb->hw;
        u32 ctrl, ctrl_ext, tssdp = 0;
 
        ctrl = rd32(E1000_CTRL);
@@ -409,28 +409,28 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
 
 static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
 {
-       struct e1000_hw *hw = &igb->hw;
-       u32 aux0_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux0_sel_sdp[IGB_N_SDP] = {
                AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
        };
-       u32 aux1_sel_sdp[IGB_N_SDP] = {
+       static const u32 aux1_sel_sdp[IGB_N_SDP] = {
                AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
        };
-       u32 ts_sdp_en[IGB_N_SDP] = {
+       static const u32 ts_sdp_en[IGB_N_SDP] = {
                TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
        };
-       u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
+       static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
                TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
                TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
        };
-       u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
+       static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
                TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
                TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
        };
-       u32 ts_sdp_sel_clr[IGB_N_SDP] = {
+       static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
                TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
                TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
        };
+       struct e1000_hw *hw = &igb->hw;
        u32 ctrl, ctrl_ext, tssdp = 0;
 
        ctrl = rd32(E1000_CTRL);
@@ -468,7 +468,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
        u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
        unsigned long flags;
        struct timespec ts;
-       int pin;
+       int pin = -1;
        s64 ns;
 
        switch (rq->type) {
index 70cc4c5c0a0130e82b44bd2aa597aca401e670de..21aea7e7f03fffb3b3223d08511002d13aa5fb89 100644 (file)
@@ -2609,7 +2609,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
 
        /* The lower 16bits of the EICR register are for the queue interrupts
-        * which should be masked here in order to not accidently clear them if
+        * which should be masked here in order to not accidentally clear them if
         * the bits are high when ixgbe_msix_other is called. There is a race
         * condition otherwise which results in possible performance loss
         * especially if the ixgbe_msix_other interrupt is triggering
@@ -3924,7 +3924,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
        for (i = 0; i < hw->mac.num_rar_entries; i++) {
                adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
                adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
-               memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+               eth_zero_addr(adapter->mac_table[i].addr);
                adapter->mac_table[i].queue = 0;
        }
        ixgbe_sync_mac_table(adapter);
@@ -3992,7 +3992,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
                    adapter->mac_table[i].queue == queue) {
                        adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
                        adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
-                       memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+                       eth_zero_addr(adapter->mac_table[i].addr);
                        adapter->mac_table[i].queue = 0;
                        ixgbe_sync_mac_table(adapter);
                        return 0;
index 79c00f57d3e7de72a1f33039702b6b188ab04f58..bd46f5d1c943b0aa674bd619b5df67dc9aa58d33 100644 (file)
@@ -488,7 +488,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
  * @work: pointer to the work struct
  *
  * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
- * timestamp has been taken for the current skb. It is necesary, because the
+ * timestamp has been taken for the current skb. It is necessary, because the
  * descriptor's "done" bit does not correlate with the timestamp event.
  */
 static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
index 7f37fe7269a7360c0edbccfab6f8e7e03ff5a3b0..09a291bb7c343c6b202e8bff6f7594329ec2a564 100644 (file)
@@ -141,7 +141,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                 * The 82599 supports up to 64 VFs per physical function
                 * but this implementation limits allocation to 63 so that
                 * basic networking resources are still available to the
-                * physical function.  If the user requests greater thn
+                * physical function.  If the user requests greater than
                 * 63 VFs then it is an error - reset to default of zero.
                 */
                adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
index fc5ecee56ca8f18dd3fbd77bf68101087da0aeec..8451f9a7cbd886179afde9ca205ecb6914aab58e 100644 (file)
@@ -1690,7 +1690,7 @@ enum {
 #define IXGBE_MACC_FS        0x00040000
 #define IXGBE_MAC_RX2TX_LPBK 0x00000002
 
-/* Veto Bit definiton */
+/* Veto Bit definition */
 #define IXGBE_MMNGC_MNG_VETO  0x00000001
 
 /* LINKS Bit Masks */
index cdb53be7d9958e4cb0f92456f459eacea122e038..f510a5822f90f53ba06c0ea770071dd462028891 100644 (file)
@@ -65,7 +65,7 @@ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  *  ixgbevf_reset_hw_vf - Performs hardware reset
  *  @hw: pointer to hardware structure
  *
- *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  Resets the hardware by resetting the transmit and receive units, masks and
  *  clears all interrupts.
  **/
 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
index fdf3e382e4649313b677fa8c164e1d2430130f3b..3e8b1bfb1f2e316212bd9b60fa06522ca4dc68db 100644 (file)
@@ -1423,7 +1423,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
 {
        struct mvpp2_prs_entry pe;
 
-       /* Promiscous mode - Accept unknown packets */
+       /* Promiscuous mode - Accept unknown packets */
 
        if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
                /* Entry exist - update port only */
@@ -3402,7 +3402,7 @@ static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool
        for (i = 0; i < bm_pool->buf_num; i++) {
                u32 vaddr;
 
-               /* Get buffer virtual adress (indirect access) */
+               /* Get buffer virtual address (indirect access) */
                mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
                vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
                if (!vaddr)
index a681d7c0bb9f066f8d48ed068f68f0001dad8d0f..20b3c7b21e632bb05dc38f6ba617c5abbac36c98 100644 (file)
@@ -1499,6 +1499,15 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_ACCESS_REG_wrapper,
        },
+       {
+               .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
+       },
        /* Native multicast commands are not available for guests */
        {
                .opcode = MLX4_CMD_QP_ATTACH,
index c95ca252187c333719fe4141225cea020909c723..cde14fa2f74229facaccce3e83fc808f4a0be004 100644 (file)
 
 #include "mlx4_en.h"
 
+/* Definitions for QCN
+ */
+
+struct mlx4_congestion_control_mb_prio_802_1_qau_params {
+       __be32 modify_enable_high;
+       __be32 modify_enable_low;
+       __be32 reserved1;
+       __be32 extended_enable;
+       __be32 rppp_max_rps;
+       __be32 rpg_time_reset;
+       __be32 rpg_byte_reset;
+       __be32 rpg_threshold;
+       __be32 rpg_max_rate;
+       __be32 rpg_ai_rate;
+       __be32 rpg_hai_rate;
+       __be32 rpg_gd;
+       __be32 rpg_min_dec_fac;
+       __be32 rpg_min_rate;
+       __be32 max_time_rise;
+       __be32 max_byte_rise;
+       __be32 max_qdelta;
+       __be32 min_qoffset;
+       __be32 gd_coefficient;
+       __be32 reserved2[5];
+       __be32 cp_sample_base;
+       __be32 reserved3[39];
+};
+
+struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
+       __be64 rppp_rp_centiseconds;
+       __be32 reserved1;
+       __be32 ignored_cnm;
+       __be32 rppp_created_rps;
+       __be32 estimated_total_rate;
+       __be32 max_active_rate_limiter_index;
+       __be32 dropped_cnms_busy_fw;
+       __be32 reserved2;
+       __be32 cnms_handled_successfully;
+       __be32 min_total_limiters_rate;
+       __be32 max_total_limiters_rate;
+       __be32 reserved3[4];
+};
+
 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
                                   struct ieee_ets *ets)
 {
@@ -242,6 +285,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
        return 0;
 }
 
+#define RPG_ENABLE_BIT 31
+#define CN_TAG_BIT     30
+
+static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
+                                    struct ieee_qcn *qcn)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
+       struct mlx4_cmd_mailbox *mailbox_out = NULL;
+       u64 mailbox_in_dma = 0;
+       u32 inmod = 0;
+       int i, err;
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+               return -EOPNOTSUPP;
+
+       mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+       if (IS_ERR(mailbox_out))
+               return -ENOMEM;
+       hw_qcn =
+       (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
+       mailbox_out->buf;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               inmod = priv->port | ((1 << i) << 8) |
+                        (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+               err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
+                                  mailbox_out->dma,
+                                  inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
+                                  MLX4_CMD_CONGESTION_CTRL_OPCODE,
+                                  MLX4_CMD_TIME_CLASS_C,
+                                  MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+                       return err;
+               }
+
+               qcn->rpg_enable[i] =
+                       be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
+               qcn->rppp_max_rps[i] =
+                       be32_to_cpu(hw_qcn->rppp_max_rps);
+               qcn->rpg_time_reset[i] =
+                       be32_to_cpu(hw_qcn->rpg_time_reset);
+               qcn->rpg_byte_reset[i] =
+                       be32_to_cpu(hw_qcn->rpg_byte_reset);
+               qcn->rpg_threshold[i] =
+                       be32_to_cpu(hw_qcn->rpg_threshold);
+               qcn->rpg_max_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_max_rate);
+               qcn->rpg_ai_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_ai_rate);
+               qcn->rpg_hai_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_hai_rate);
+               qcn->rpg_gd[i] =
+                       be32_to_cpu(hw_qcn->rpg_gd);
+               qcn->rpg_min_dec_fac[i] =
+                       be32_to_cpu(hw_qcn->rpg_min_dec_fac);
+               qcn->rpg_min_rate[i] =
+                       be32_to_cpu(hw_qcn->rpg_min_rate);
+               qcn->cndd_state_machine[i] =
+                       priv->cndd_state[i];
+       }
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
+                                    struct ieee_qcn *qcn)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
+       struct mlx4_cmd_mailbox *mailbox_in = NULL;
+       u64 mailbox_in_dma = 0;
+       u32 inmod = 0;
+       int i, err;
+#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
+#define MODIFY_ENABLE_LOW_MASK 0xffc00000
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+               return -EOPNOTSUPP;
+
+       mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+       if (IS_ERR(mailbox_in))
+               return -ENOMEM;
+
+       mailbox_in_dma = mailbox_in->dma;
+       hw_qcn =
+       (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               inmod = priv->port | ((1 << i) << 8) |
+                        (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+
+               /* Before updating QCN parameter,
+                * need to set it's modify enable bit to 1
+                */
+
+               hw_qcn->modify_enable_high = cpu_to_be32(
+                                               MODIFY_ENABLE_HIGH_MASK);
+               hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
+
+               hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
+               hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
+               hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
+               hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
+               hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
+               hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
+               hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
+               hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
+               hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
+               hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
+               hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
+               priv->cndd_state[i] = qcn->cndd_state_machine[i];
+               if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
+                       hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
+
+               err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
+                              MLX4_CONGESTION_CONTROL_SET_PARAMS,
+                              MLX4_CMD_CONGESTION_CTRL_OPCODE,
+                              MLX4_CMD_TIME_CLASS_C,
+                              MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
+                       return err;
+               }
+       }
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
+                                         struct ieee_qcn_stats *qcn_stats)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
+       struct mlx4_cmd_mailbox *mailbox_out = NULL;
+       u64 mailbox_in_dma = 0;
+       u32 inmod = 0;
+       int i, err;
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+               return -EOPNOTSUPP;
+
+       mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+       if (IS_ERR(mailbox_out))
+               return -ENOMEM;
+
+       hw_qcn_stats =
+       (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
+       mailbox_out->buf;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               inmod = priv->port | ((1 << i) << 8) |
+                        (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+               err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
+                                  mailbox_out->dma, inmod,
+                                  MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+                                  MLX4_CMD_CONGESTION_CTRL_OPCODE,
+                                  MLX4_CMD_TIME_CLASS_C,
+                                  MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+                       return err;
+               }
+               qcn_stats->rppp_rp_centiseconds[i] =
+                       be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
+               qcn_stats->rppp_created_rps[i] =
+                       be32_to_cpu(hw_qcn_stats->rppp_created_rps);
+       }
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+       return 0;
+}
+
 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
        .ieee_getets    = mlx4_en_dcbnl_ieee_getets,
        .ieee_setets    = mlx4_en_dcbnl_ieee_setets,
@@ -252,6 +467,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
 
        .getdcbx        = mlx4_en_dcbnl_getdcbx,
        .setdcbx        = mlx4_en_dcbnl_setdcbx,
+       .ieee_getqcn    = mlx4_en_dcbnl_ieee_getqcn,
+       .ieee_setqcn    = mlx4_en_dcbnl_ieee_setqcn,
+       .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
 };
 
 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
index 2a210c4efb895728ec6ad12eaef9ec8f9ff7fd08..c59ed925adaf322744c320d4274264ed7a9eebfb 100644 (file)
@@ -1685,7 +1685,7 @@ int mlx4_en_start_port(struct net_device *dev)
        }
 
        /* Attach rx QP to bradcast address */
-       memset(&mc_list[10], 0xff, ETH_ALEN);
+       eth_broadcast_addr(&mc_list[10]);
        mc_list[5] = priv->port; /* needed for B0 steering support */
        if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
                                  priv->port, 0, MLX4_PROT_ETH,
@@ -1788,7 +1788,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        }
 
        /* Detach All multicasts */
-       memset(&mc_list[10], 0xff, ETH_ALEN);
+       eth_broadcast_addr(&mc_list[10]);
        mc_list[5] = priv->port; /* needed for B0 steering support */
        mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
                              MLX4_PROT_ETH, priv->broadcast_id);
index 2d8ee66138e8ad48cb72daa773a67c1f421e4cac..b66e03d9711f945fe06827ed480258a78ce26833 100644 (file)
@@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
        ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
        packet  = (unsigned char *)skb_put(skb, packet_size);
        memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
-       memset(ethh->h_source, 0, ETH_ALEN);
+       eth_zero_addr(ethh->h_source);
        ethh->h_proto = htons(ETH_P_ARP);
        skb_set_mac_header(skb, 0);
        for (i = 0; i < packet_size; ++i)       /* fill our packet */
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 {
        u32 loopback_ok = 0;
        int i;
-
+       bool gro_enabled;
 
         priv->loopback_ok = 0;
        priv->validate_loopback = 1;
+       gro_enabled = priv->dev->features & NETIF_F_GRO;
 
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+       priv->dev->features &= ~NETIF_F_GRO;
 
        /* xmit */
        if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 mlx4_en_test_loopback_exit:
 
        priv->validate_loopback = 0;
+
+       if (gro_enabled)
+               priv->dev->features |= NETIF_F_GRO;
+
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
        return !loopback_ok;
 }
index 5a21e5dc94cbae7f8c35d989aba039afcb5c4f77..242bcee5d774359a6a32950effa3fa5100901b4a 100644 (file)
@@ -143,7 +143,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [18] = "More than 80 VFs support",
                [19] = "Performance optimized for limited rule configuration flow steering support",
                [20] = "Recoverable error events support",
-               [21] = "Port Remap support"
+               [21] = "Port Remap support",
+               [22] = "QCN support"
        };
        int i;
 
@@ -675,7 +676,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET    0x76
 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET      0x77
 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE  0x7a
-#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET     0x7a
+#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET       0x7b
 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET   0x80
 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET      0x82
 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET      0x84
@@ -777,6 +778,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
        dev_cap->fs_max_num_qp_per_entry = field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+       if (field & 0x1)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
        MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
        dev_cap->stat_rate_support = stat_rate;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
@@ -1149,6 +1153,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
                     DEV_CAP_EXT_2_FLAG_FSM);
        MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
 
+       /* turn off QCN for guests */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+       field &= 0xfe;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+
        return 0;
 }
 
index 1409d0cd6143e8554524c8377a018ccb1c3edba1..0b16db015745b33885ff24198bd933d22d547eae 100644 (file)
@@ -175,7 +175,7 @@ enum mlx4_res_tracker_free_type {
 
 /*
  *Virtual HCR structures.
- * mlx4_vhcr is the sw representation, in machine endianess
+ * mlx4_vhcr is the sw representation, in machine endianness
  *
  * mlx4_vhcr_cmd is the formalized structure, the one that is passed
  * to FW to go through communication channel.
index 2a8268e6be15d0b8682b8ad47bb4bb4ac071b243..94553b501c76e7a907eff59b44f24821e2cbe54a 100644 (file)
@@ -608,6 +608,7 @@ struct mlx4_en_priv {
 #ifdef CONFIG_MLX4_EN_DCB
        struct ieee_ets ets;
        u16 maxrate[IEEE_8021QAZ_MAX_TCS];
+       enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
 #endif
 #ifdef CONFIG_RFS_ACCEL
        spinlock_t filters_lock;
index 2bb8553bd9054b25456ec694ee25696e93ebde25..eda29dbbfcd259824f0a0fbec3876975f215d2e2 100644 (file)
@@ -412,7 +412,6 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params)
index 486e3d26cd4a9ef4bb6a23995b85ac50cd413776..d43e25914d19260ae6ad3b36915e431366cd12d9 100644 (file)
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
        u32 qp_type;
-       int port;
+       int port, err = 0;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
        priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                        } else {
                                struct mlx4_update_qp_params params = {.flags = 0};
 
-                               mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                               err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                               if (err)
+                                       goto out;
                        }
                }
 
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
        }
-       return 0;
+out:
+       return err;
 }
 
 static int mpt_mask(struct mlx4_dev *dev)
@@ -3024,7 +3027,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
 
        /* Call the SW implementation of write_mtt:
         * - Prepare a dummy mtt struct
-        * - Translate inbox contents to simple addresses in host endianess */
+        * - Translate inbox contents to simple addresses in host endianness */
        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
                            we don't really use it */
        mtt.order = 0;
index 10988fbf47ebbeffd17cd8d1fb4d6a1c77c29405..6f332ebdf3b5a812dd34be59a072e497af380975 100644 (file)
@@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
 
        for (i = 0; i < hw->addr_list_size; i++) {
                if (ether_addr_equal(hw->address[i], mac_addr)) {
-                       memset(hw->address[i], 0, ETH_ALEN);
+                       eth_zero_addr(hw->address[i]);
                        writel(0, hw->io + ADD_ADDR_INCR * i +
                                KS_ADD_ADDR_0_HI);
                        return 0;
index 6c72e74fef3e01a7189a81e2e1fa2355a4f2bdbd..81d0f1c86d6dee1243d5d65a7e499767698844eb 100644 (file)
@@ -150,7 +150,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
 
        priv->rx_head = 0;
 
-       /* reset the MAC controler TX/RX desciptor base address */
+       /* reset the MAC controller TX/RX desciptor base address */
        writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
        writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
 }
index a4cdf2f8041a735de3a199d5e0ba6c5040a02b4e..092dcae0d4a969523e7cd99dfd3380398c3e7c8c 100644 (file)
@@ -1343,7 +1343,7 @@ static int init_nic(struct s2io_nic *nic)
                TX_PA_CFG_IGNORE_L2_ERR;
        writeq(val64, &bar0->tx_pa_cfg);
 
-       /* Rx DMA intialization. */
+       /* Rx DMA initialization. */
        val64 = 0;
        for (i = 0; i < config->rx_ring_num; i++) {
                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
index 4fe8ea96bd25d24f1f2296d22230ceebda1e8f64..f6fcf7450352631ad34f7c052fbe5ac960238297 100644 (file)
@@ -394,7 +394,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
 }
 
 /**
- * pch_gbe_set_pauseparam - Set pause paramters
+ * pch_gbe_set_pauseparam - Set pause parameters
  * @netdev:  Network interface device structure
  * @pause:   Pause parameters structure
  * Returns:
index 319d9d40f922e4616945de4ad46927d707668607..13d88a6025c82a89fb290bc6930cd4055bd1535b 100644 (file)
@@ -350,7 +350,7 @@ V.  Recent Changes
     incorrectly defined and corrected (as per Michel Mueller).
 
 02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
-    were available before reseting the tbusy and tx_full flags
+    were available before resetting the tbusy and tx_full flags
     (as per Michel Mueller).
 
 03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
index 44e8d7d255474d30bf48577723caf9032af43854..57a6e6cd74fc3c9c99708530b431cd0e5b768f8c 100644 (file)
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
        if (mac->phydev)
                phy_start(mac->phydev);
 
-       init_timer(&mac->tx->clean_timer);
-       mac->tx->clean_timer.function = pasemi_mac_tx_timer;
-       mac->tx->clean_timer.data = (unsigned long)mac->tx;
-       mac->tx->clean_timer.expires = jiffies+HZ;
-       add_timer(&mac->tx->clean_timer);
+       setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+                   (unsigned long)mac->tx);
+       mod_timer(&mac->tx->clean_timer, jiffies + HZ);
 
        return 0;
 
index 6e426ae9469228ed55586bca15a8eef1dcb5e5c4..0a5e204a0179a35c15f52a3dea58729c30c2c31f 100644 (file)
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
 
 } __attribute__ ((aligned(64)));
 
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
 struct rcv_desc {
        __le16 reference_handle;
        __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
 #define NETXEN_IMAGE_START     0x43000 /* compressed image */
 #define NETXEN_SECONDARY_START 0x200000        /* backup images */
 #define NETXEN_PXE_START       0x3E0000        /* PXE boot rom */
-#define NETXEN_USER_START      0x3E8000        /* Firmare info */
+#define NETXEN_USER_START      0x3E8000        /* Firmware info */
 #define NETXEN_FIXED_START     0x3F0000        /* backup of crbinit */
 #define NETXEN_USER_START_OLD  NETXEN_PXE_START /* very old flash */
 
index 716fc37ada5a961677b577b0c693427ff8b32ee6..db80eb1c6d4fc5ebccea52aa86e87a7578ead04f 100644 (file)
@@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
        u8 null_addr[ETH_ALEN];
        int i;
 
-       memset(null_addr, 0, ETH_ALEN);
+       eth_zero_addr(null_addr);
 
        if (netdev->flags & IFF_PROMISC) {
 
index fa4317611fd63fe81df2e23e47fa307b8c5c5348..f221126a5c4e6789cb2630a07dc58b02f0676239 100644 (file)
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
 #define QLCNIC_BRDCFG_START    0x4000          /* board config */
 #define QLCNIC_BOOTLD_START    0x10000         /* bootld */
 #define QLCNIC_IMAGE_START     0x43000         /* compressed image */
-#define QLCNIC_USER_START      0x3E8000        /* Firmare info */
+#define QLCNIC_USER_START      0x3E8000        /* Firmware info */
 
 #define QLCNIC_FW_VERSION_OFFSET       (QLCNIC_USER_START+0x408)
 #define QLCNIC_FW_SIZE_OFFSET          (QLCNIC_USER_START+0x40c)
index f3346a3779d3c36c2f91d4703feb75dc500e39df..69f828eb42cf3762f525ee492d0abe9d5d33d1e7 100644 (file)
@@ -205,7 +205,7 @@ struct qlcnic_add_rings_mbx_out {
  * @phys_addr_{low|high}: DMA address of the transmit buffer
  * @cnsmr_index_{low|high}: host consumer index
  * @size: legth of transmit buffer ring
- * @intr_id: interrput id
+ * @intr_id: interrupt id
  * @src: src of interrupt
  */
 struct qlcnic_tx_mbx {
index 2bb48d57e7a51856225b7cbb7fa7bb0ebca7e510..33669c29b341cb42bb106ec2c634663d0adb2415 100644 (file)
@@ -269,7 +269,7 @@ static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
        }
 
        QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
-       /* Clear gracefull reset bit */
+       /* Clear graceful reset bit */
        val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
        val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
        QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
@@ -889,7 +889,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
  * @adapter: adapter structure
  *
  * Device will remain in this state until:
- *     Reset request ACK's are recieved from all the functions
+ *     Reset request ACK's are received from all the functions
  *     Wait time exceeds max time limit
  *
  * Returns: Error code or Success(0)
index 8011ef3e7707f783f4caf9f1c16f3d7be3410c4f..25800a1dedcb9fbe0635e80386521dc789575fba 100644 (file)
@@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
                             "Set Mac addr %pM\n", addr);
        } else {
-               memset(zero_mac_addr, 0, ETH_ALEN);
+               eth_zero_addr(zero_mac_addr);
                addr = &zero_mac_addr[0];
                netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
                             "Clearing MAC address\n");
index 2c811f66d5acc47da86407222c4eea21d14954d0..4a42e960d331e66ea525a3d7c5755ce29b9e6f6f 100644 (file)
@@ -571,7 +571,7 @@ qcaspi_spi_thread(void *data)
                        }
 
                        /* can only handle other interrupts
-                        * if sync has occured
+                        * if sync has occurred
                         */
                        if (qca->sync == QCASPI_SYNC_READY) {
                                if (intr_cause & SPI_INT_PKT_AVLBL)
index ad0020af2193da8749534c25242047212cccc1a4..c70ab40d86989974d54c9161bf7acd8558d93c74 100644 (file)
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
        int rc = -EINVAL;
 
        if (!rtl_fw_format_ok(tp, rtl_fw)) {
-               netif_err(tp, ifup, dev, "invalid firwmare\n");
+               netif_err(tp, ifup, dev, "invalid firmware\n");
                goto out;
        }
 
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
        RTL_W8(ChipCmd, CmdReset);
 
        rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
-       netdev_reset_queue(tp->dev);
 }
 
 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        u32 status, len;
        u32 opts[2];
        int frags;
-       bool stop_queue;
 
        if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        txd->opts2 = cpu_to_le32(opts[1]);
 
-       netdev_sent_queue(dev, skb->len);
-
        skb_tx_timestamp(skb);
 
        /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        tp->cur_tx += frags + 1;
 
-       stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+       RTL_W8(TxPoll, NPQ);
 
-       if (!skb->xmit_more || stop_queue ||
-           netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
-               RTL_W8(TxPoll, NPQ);
-
-               mmiowb();
-       }
+       mmiowb();
 
-       if (stop_queue) {
+       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
                 * not miss a ring update when it notices a stopped queue.
                 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
 {
        unsigned int dirty_tx, tx_left;
-       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        dirty_tx = tp->dirty_tx;
        smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
                rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
                                     tp->TxDescArray + entry);
                if (status & LastFrag) {
-                       pkts_compl++;
-                       bytes_compl += tx_skb->skb->len;
+                       u64_stats_update_begin(&tp->tx_stats.syncp);
+                       tp->tx_stats.packets++;
+                       tp->tx_stats.bytes += tx_skb->skb->len;
+                       u64_stats_update_end(&tp->tx_stats.syncp);
                        dev_kfree_skb_any(tx_skb->skb);
                        tx_skb->skb = NULL;
                }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
        }
 
        if (tp->dirty_tx != dirty_tx) {
-               netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
-               u64_stats_update_begin(&tp->tx_stats.syncp);
-               tp->tx_stats.packets += pkts_compl;
-               tp->tx_stats.bytes += bytes_compl;
-               u64_stats_update_end(&tp->tx_stats.syncp);
-
                tp->dirty_tx = dirty_tx;
                /* Sync with rtl8169_start_xmit:
                 * - publish dirty_tx ring index (write barrier)
index 4da8bd263997a17baf89b5fe7a3d2198f186827b..7fb244f565b283b0c130caa7476a8e4339564283 100644 (file)
                NETIF_MSG_RX_ERR| \
                NETIF_MSG_TX_ERR)
 
+#define SH_ETH_OFFSET_DEFAULTS                 \
+       [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
+
 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [EDSR]          = 0x0000,
        [EDMR]          = 0x0400,
        [EDTRR]         = 0x0408,
@@ -132,9 +137,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_POST3]     = 0x0078,
        [TSU_POST4]     = 0x007c,
        [TSU_ADRH0]     = 0x0100,
-       [TSU_ADRL0]     = 0x0104,
-       [TSU_ADRH31]    = 0x01f8,
-       [TSU_ADRL31]    = 0x01fc,
 
        [TXNLCR0]       = 0x0080,
        [TXALCR0]       = 0x0084,
@@ -151,6 +153,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [EDSR]          = 0x0000,
        [EDMR]          = 0x0400,
        [EDTRR]         = 0x0408,
@@ -199,9 +203,6 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_ADSBSY]    = 0x0060,
        [TSU_TEN]       = 0x0064,
        [TSU_ADRH0]     = 0x0100,
-       [TSU_ADRL0]     = 0x0104,
-       [TSU_ADRH31]    = 0x01f8,
-       [TSU_ADRL31]    = 0x01fc,
 
        [TXNLCR0]       = 0x0080,
        [TXALCR0]       = 0x0084,
@@ -210,6 +211,8 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [ECMR]          = 0x0300,
        [RFLR]          = 0x0308,
        [ECSR]          = 0x0310,
@@ -256,6 +259,8 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [ECMR]          = 0x0100,
        [RFLR]          = 0x0108,
        [ECSR]          = 0x0110,
@@ -308,6 +313,8 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+       SH_ETH_OFFSET_DEFAULTS,
+
        [EDMR]          = 0x0000,
        [EDTRR]         = 0x0004,
        [EDRRR]         = 0x0008,
@@ -392,8 +399,6 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [FWALCR1]       = 0x00b4,
 
        [TSU_ADRH0]     = 0x0100,
-       [TSU_ADRL0]     = 0x0104,
-       [TSU_ADRL31]    = 0x01fc,
 };
 
 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
@@ -508,7 +513,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .rmiimode       = 1,
-       .shift_rd0      = 1,
 };
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -589,6 +593,7 @@ static struct sh_eth_cpu_data sh7757_data = {
        .no_ade         = 1,
        .rpadir         = 1,
        .rpadir_value   = 2 << 16,
+       .rtrate         = 1,
 };
 
 #define SH_GIGA_ETH_BASE       0xfee00000UL
@@ -1392,6 +1397,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        msleep(2); /* max frame time at 10 Mbps < 1250 us */
        sh_eth_get_stats(ndev);
        sh_eth_reset(ndev);
+
+       /* Set MAC address again */
+       update_mac_address(ndev);
 }
 
 /* free Tx skb function */
@@ -1407,6 +1415,11 @@ static int sh_eth_txfree(struct net_device *ndev)
                txdesc = &mdp->tx_ring[entry];
                if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
                        break;
+               /* TACT bit must be checked before all the following reads */
+               rmb();
+               netif_info(mdp, tx_done, ndev,
+                          "tx entry %d status 0x%08x\n",
+                          entry, edmac_to_cpu(mdp, txdesc->status));
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
                        dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,19 +1457,25 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        limit = boguscnt;
        rxdesc = &mdp->rx_ring[entry];
        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+               /* RACT bit must be checked before all the following reads */
+               rmb();
                desc_status = edmac_to_cpu(mdp, rxdesc->status);
                pkt_len = rxdesc->frame_length;
 
                if (--boguscnt < 0)
                        break;
 
+               netif_info(mdp, rx_status, ndev,
+                          "rx entry %d status 0x%08x len %d\n",
+                          entry, desc_status, pkt_len);
+
                if (!(desc_status & RDFEND))
                        ndev->stats.rx_length_errors++;
 
                /* In case of almost all GETHER/ETHERs, the Receive Frame State
                 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
-                * bit 0. However, in case of the R8A7740, R8A779x, and
-                * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+                * bit 0. However, in case of the R8A7740 and R7S72100
+                * the RFS bits are from bit 25 to bit 16. So, the
                 * driver needs right shifting by 16.
                 */
                if (mdp->cd->shift_rd0)
@@ -1494,6 +1513,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        netif_receive_skb(skb);
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += pkt_len;
+                       if (desc_status & RD_RFS8)
+                               ndev->stats.multicast++;
                }
                entry = (++mdp->cur_rx) % mdp->num_rx_ring;
                rxdesc = &mdp->rx_ring[entry];
@@ -1523,6 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        skb_checksum_none_assert(skb);
                        rxdesc->addr = dma_addr;
                }
+               wmb(); /* RACT bit must be set after all the above writes */
                if (entry >= mdp->num_rx_ring - 1)
                        rxdesc->status |=
                                cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1557,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        /* If we don't need to check status, don't. -KDU */
        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
                /* fix the values for the next receiving if RDE is set */
-               if (intr_status & EESR_RDE) {
+               if (intr_status & EESR_RDE &&
+                   mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
                        u32 count = (sh_eth_read(ndev, RDFAR) -
                                     sh_eth_read(ndev, RDLAR)) >> 4;
 
@@ -1922,6 +1945,192 @@ error_exit:
        return ret;
 }
 
+/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
+ * version must be bumped as well.  Just adding registers up to that
+ * limit is fine, as long as the existing register indices don't
+ * change.
+ */
+#define SH_ETH_REG_DUMP_VERSION                1
+#define SH_ETH_REG_DUMP_MAX_REGS       256
+
+static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct sh_eth_cpu_data *cd = mdp->cd;
+       u32 *valid_map;
+       size_t len;
+
+       BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
+
+       /* Dump starts with a bitmap that tells ethtool which
+        * registers are defined for this chip.
+        */
+       len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
+       if (buf) {
+               valid_map = buf;
+               buf += len;
+       } else {
+               valid_map = NULL;
+       }
+
+       /* Add a register to the dump, if it has a defined offset.
+        * This automatically skips most undefined registers, but for
+        * some it is also necessary to check a capability flag in
+        * struct sh_eth_cpu_data.
+        */
+#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
+#define add_reg_from(reg, read_expr) do {                              \
+               if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
+                       if (buf) {                                      \
+                               mark_reg_valid(reg);                    \
+                               *buf++ = read_expr;                     \
+                       }                                               \
+                       ++len;                                          \
+               }                                                       \
+       } while (0)
+#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
+#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
+
+       add_reg(EDSR);
+       add_reg(EDMR);
+       add_reg(EDTRR);
+       add_reg(EDRRR);
+       add_reg(EESR);
+       add_reg(EESIPR);
+       add_reg(TDLAR);
+       add_reg(TDFAR);
+       add_reg(TDFXR);
+       add_reg(TDFFR);
+       add_reg(RDLAR);
+       add_reg(RDFAR);
+       add_reg(RDFXR);
+       add_reg(RDFFR);
+       add_reg(TRSCER);
+       add_reg(RMFCR);
+       add_reg(TFTR);
+       add_reg(FDR);
+       add_reg(RMCR);
+       add_reg(TFUCR);
+       add_reg(RFOCR);
+       if (cd->rmiimode)
+               add_reg(RMIIMODE);
+       add_reg(FCFTR);
+       if (cd->rpadir)
+               add_reg(RPADIR);
+       if (!cd->no_trimd)
+               add_reg(TRIMD);
+       add_reg(ECMR);
+       add_reg(ECSR);
+       add_reg(ECSIPR);
+       add_reg(PIR);
+       if (!cd->no_psr)
+               add_reg(PSR);
+       add_reg(RDMLR);
+       add_reg(RFLR);
+       add_reg(IPGR);
+       if (cd->apr)
+               add_reg(APR);
+       if (cd->mpr)
+               add_reg(MPR);
+       add_reg(RFCR);
+       add_reg(RFCF);
+       if (cd->tpauser)
+               add_reg(TPAUSER);
+       add_reg(TPAUSECR);
+       add_reg(GECMR);
+       if (cd->bculr)
+               add_reg(BCULR);
+       add_reg(MAHR);
+       add_reg(MALR);
+       add_reg(TROCR);
+       add_reg(CDCR);
+       add_reg(LCCR);
+       add_reg(CNDCR);
+       add_reg(CEFCR);
+       add_reg(FRECR);
+       add_reg(TSFRCR);
+       add_reg(TLFRCR);
+       add_reg(CERCR);
+       add_reg(CEECR);
+       add_reg(MAFCR);
+       if (cd->rtrate)
+               add_reg(RTRATE);
+       if (cd->hw_crc)
+               add_reg(CSMR);
+       if (cd->select_mii)
+               add_reg(RMII_MII);
+       add_reg(ARSTR);
+       if (cd->tsu) {
+               add_tsu_reg(TSU_CTRST);
+               add_tsu_reg(TSU_FWEN0);
+               add_tsu_reg(TSU_FWEN1);
+               add_tsu_reg(TSU_FCM);
+               add_tsu_reg(TSU_BSYSL0);
+               add_tsu_reg(TSU_BSYSL1);
+               add_tsu_reg(TSU_PRISL0);
+               add_tsu_reg(TSU_PRISL1);
+               add_tsu_reg(TSU_FWSL0);
+               add_tsu_reg(TSU_FWSL1);
+               add_tsu_reg(TSU_FWSLC);
+               add_tsu_reg(TSU_QTAG0);
+               add_tsu_reg(TSU_QTAG1);
+               add_tsu_reg(TSU_QTAGM0);
+               add_tsu_reg(TSU_QTAGM1);
+               add_tsu_reg(TSU_FWSR);
+               add_tsu_reg(TSU_FWINMK);
+               add_tsu_reg(TSU_ADQT0);
+               add_tsu_reg(TSU_ADQT1);
+               add_tsu_reg(TSU_VTAG0);
+               add_tsu_reg(TSU_VTAG1);
+               add_tsu_reg(TSU_ADSBSY);
+               add_tsu_reg(TSU_TEN);
+               add_tsu_reg(TSU_POST1);
+               add_tsu_reg(TSU_POST2);
+               add_tsu_reg(TSU_POST3);
+               add_tsu_reg(TSU_POST4);
+               if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
+                       /* This is the start of a table, not just a single
+                        * register.
+                        */
+                       if (buf) {
+                               unsigned int i;
+
+                               mark_reg_valid(TSU_ADRH0);
+                               for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
+                                       *buf++ = ioread32(
+                                               mdp->tsu_addr +
+                                               mdp->reg_offset[TSU_ADRH0] +
+                                               i * 4);
+                       }
+                       len += SH_ETH_TSU_CAM_ENTRIES * 2;
+               }
+       }
+
+#undef mark_reg_valid
+#undef add_reg_from
+#undef add_reg
+#undef add_tsu_reg
+
+       return len * 4;
+}
+
+static int sh_eth_get_regs_len(struct net_device *ndev)
+{
+       return __sh_eth_get_regs(ndev, NULL);
+}
+
+static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+                           void *buf)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       regs->version = SH_ETH_REG_DUMP_VERSION;
+
+       pm_runtime_get_sync(&mdp->pdev->dev);
+       __sh_eth_get_regs(ndev, buf);
+       pm_runtime_put_sync(&mdp->pdev->dev);
+}
+
 static int sh_eth_nway_reset(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2067,6 +2276,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
 static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_settings   = sh_eth_get_settings,
        .set_settings   = sh_eth_set_settings,
+       .get_regs_len   = sh_eth_get_regs_len,
+       .get_regs       = sh_eth_get_regs,
        .nway_reset     = sh_eth_nway_reset,
        .get_msglevel   = sh_eth_get_msglevel,
        .set_msglevel   = sh_eth_set_msglevel,
@@ -2174,7 +2385,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        spin_unlock_irqrestore(&mdp->lock, flags);
 
-       if (skb_padto(skb, ETH_ZLEN))
+       if (skb_put_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
 
        entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2403,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        txdesc->buffer_length = skb->len;
 
+       wmb(); /* TACT bit must be set after all the above writes */
        if (entry >= mdp->num_tx_ring - 1)
                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
        else
@@ -2205,6 +2417,22 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+/* The statistics registers have write-clear behaviour, which means we
+ * will lose any increment between the read and write.  We mitigate
+ * this by only clearing when we read a non-zero value, so we will
+ * never falsely report a total of zero.
+ */
+static void
+sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
+{
+       u32 delta = sh_eth_read(ndev, reg);
+
+       if (delta) {
+               *stat += delta;
+               sh_eth_write(ndev, 0, reg);
+       }
+}
+
 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2215,21 +2443,18 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
        if (!mdp->is_opened)
                return &ndev->stats;
 
-       ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
-       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
-       ndev->stats.collisions += sh_eth_read(ndev, CDCR);
-       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
-       ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
-       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
+       sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
+       sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
+       sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
 
        if (sh_eth_is_gether(mdp)) {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
-               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
-               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
+               sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+                                  CERCR);
+               sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+                                  CEECR);
        } else {
-               ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
-               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
+               sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+                                  CNDCR);
        }
 
        return &ndev->stats;
index 259d03f353e109709abfbaac4a447d9f4af82026..06dbbe5201cbc915cf28401307cdaa5f0d2dc91f 100644 (file)
 #define SH_ETH_TSU_CAM_ENTRIES 32
 
 enum {
+       /* IMPORTANT: To keep ethtool register dump working, add new
+        * register names immediately before SH_ETH_MAX_REGISTER_OFFSET.
+        */
+
        /* E-DMAC registers */
        EDSR = 0,
        EDMR,
@@ -131,9 +135,7 @@ enum {
        TSU_POST3,
        TSU_POST4,
        TSU_ADRH0,
-       TSU_ADRL0,
-       TSU_ADRH31,
-       TSU_ADRL31,
+       /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */
 
        TXNLCR0,
        TXALCR0,
@@ -491,6 +493,7 @@ struct sh_eth_cpu_data {
        unsigned select_mii:1;  /* EtherC have RMII_MII (MII select register) */
        unsigned shift_rd0:1;   /* shift Rx descriptor word 0 right by 16 */
        unsigned rmiimode:1;    /* EtherC has RMIIMODE register */
+       unsigned rtrate:1;      /* EtherC has RTRATE register */
 };
 
 struct sh_eth_private {
@@ -543,19 +546,29 @@ static inline void sh_eth_soft_swap(char *src, int len)
 #endif
 }
 
+#define SH_ETH_OFFSET_INVALID  ((u16) ~0)
+
 static inline void sh_eth_write(struct net_device *ndev, u32 data,
                                int enum_index)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
+       u16 offset = mdp->reg_offset[enum_index];
 
-       iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
+       if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+               return;
+
+       iowrite32(data, mdp->addr + offset);
 }
 
 static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
+       u16 offset = mdp->reg_offset[enum_index];
+
+       if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+               return ~0U;
 
-       return ioread32(mdp->addr + mdp->reg_offset[enum_index]);
+       return ioread32(mdp->addr + offset);
 }
 
 static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
index e5a15a4c4e8ff30eadebcd21b77d8d6c95e0dafc..65e140315a58ec14411d51333a489e37588c0e31 100644 (file)
@@ -32,6 +32,9 @@
 #include <linux/bitops.h>
 #include <net/switchdev.h>
 #include <net/rtnetlink.h>
+#include <net/ip_fib.h>
+#include <net/netevent.h>
+#include <net/arp.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 #include <generated/utsrelease.h>
 
@@ -111,9 +114,10 @@ struct rocker_flow_tbl_key {
 
 struct rocker_flow_tbl_entry {
        struct hlist_node entry;
-       u32 ref_count;
+       u32 cmd;
        u64 cookie;
        struct rocker_flow_tbl_key key;
+       size_t key_len;
        u32 key_crc32; /* key */
 };
 
@@ -161,6 +165,16 @@ struct rocker_internal_vlan_tbl_entry {
        __be16 vlan_id;
 };
 
+struct rocker_neigh_tbl_entry {
+       struct hlist_node entry;
+       __be32 ip_addr; /* key */
+       struct net_device *dev;
+       u32 ref_count;
+       u32 index;
+       u8 eth_dst[ETH_ALEN];
+       bool ttl_check;
+};
+
 struct rocker_desc_info {
        char *data; /* mapped */
        size_t data_size;
@@ -234,6 +248,9 @@ struct rocker {
        unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
        DECLARE_HASHTABLE(internal_vlan_tbl, 8);
        spinlock_t internal_vlan_tbl_lock;
+       DECLARE_HASHTABLE(neigh_tbl, 16);
+       spinlock_t neigh_tbl_lock;
+       u32 neigh_tbl_next_index;
 };
 
 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
@@ -256,7 +273,6 @@ enum {
        ROCKER_PRIORITY_VLAN = 1,
        ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
        ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
-       ROCKER_PRIORITY_UNICAST_ROUTING = 1,
        ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
        ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
        ROCKER_PRIORITY_BRIDGING_VLAN = 3,
@@ -1280,9 +1296,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
        u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
        if (enable)
-               val |= 1 << rocker_port->pport;
+               val |= 1ULL << rocker_port->pport;
        else
-               val &= ~(1 << rocker_port->pport);
+               val &= ~(1ULL << rocker_port->pport);
        rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
 }
 
@@ -1940,8 +1956,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
        struct rocker_tlv *cmd_info;
        int err = 0;
 
-       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
-                              ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
                return -EMSGSIZE;
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
@@ -1998,8 +2013,7 @@ static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
        const struct rocker_flow_tbl_entry *entry = priv;
        struct rocker_tlv *cmd_info;
 
-       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
-                              ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
                return -EMSGSIZE;
        cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
        if (!cmd_info)
@@ -2168,9 +2182,9 @@ static int rocker_cmd_group_tbl_del(struct rocker *rocker,
        return 0;
 }
 
-/*****************************************
- * Flow, group, FDB, internal VLAN tables
- *****************************************/
+/***************************************************
+ * Flow, group, FDB, internal VLAN and neigh tables
+ ***************************************************/
 
 static int rocker_init_tbls(struct rocker *rocker)
 {
@@ -2186,6 +2200,9 @@ static int rocker_init_tbls(struct rocker *rocker)
        hash_init(rocker->internal_vlan_tbl);
        spin_lock_init(&rocker->internal_vlan_tbl_lock);
 
+       hash_init(rocker->neigh_tbl);
+       spin_lock_init(&rocker->neigh_tbl_lock);
+
        return 0;
 }
 
@@ -2196,6 +2213,7 @@ static void rocker_free_tbls(struct rocker *rocker)
        struct rocker_group_tbl_entry *group_entry;
        struct rocker_fdb_tbl_entry *fdb_entry;
        struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
+       struct rocker_neigh_tbl_entry *neigh_entry;
        struct hlist_node *tmp;
        int bkt;
 
@@ -2219,16 +2237,22 @@ static void rocker_free_tbls(struct rocker *rocker)
                           tmp, internal_vlan_entry, entry)
                hash_del(&internal_vlan_entry->entry);
        spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
+
+       spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
+       hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
+               hash_del(&neigh_entry->entry);
+       spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
 }
 
 static struct rocker_flow_tbl_entry *
 rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
 {
        struct rocker_flow_tbl_entry *found;
+       size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 
        hash_for_each_possible(rocker->flow_tbl, found,
                               entry, match->key_crc32) {
-               if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+               if (memcmp(&found->key, &match->key, key_len) == 0)
                        return found;
        }
 
@@ -2241,42 +2265,34 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_flow_tbl_entry *found;
+       size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
        unsigned long flags;
-       bool add_to_hw = false;
-       int err = 0;
 
-       match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+       match->key_crc32 = crc32(~0, &match->key, key_len);
 
        spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
 
        found = rocker_flow_tbl_find(rocker, match);
 
        if (found) {
-               kfree(match);
+               match->cookie = found->cookie;
+               hash_del(&found->entry);
+               kfree(found);
+               found = match;
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
        } else {
                found = match;
                found->cookie = rocker->flow_tbl_next_cookie++;
-               hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
-               add_to_hw = true;
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
        }
 
-       found->ref_count++;
+       hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
 
        spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
 
-       if (add_to_hw) {
-               err = rocker_cmd_exec(rocker, rocker_port,
-                                     rocker_cmd_flow_tbl_add,
-                                     found, NULL, NULL, nowait);
-               if (err) {
-                       spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
-                       hash_del(&found->entry);
-                       spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
-                       kfree(found);
-               }
-       }
-
-       return err;
+       return rocker_cmd_exec(rocker, rocker_port,
+                              rocker_cmd_flow_tbl_add,
+                              found, NULL, NULL, nowait);
 }
 
 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
@@ -2285,29 +2301,26 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_flow_tbl_entry *found;
+       size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
        unsigned long flags;
-       bool del_from_hw = false;
        int err = 0;
 
-       match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+       match->key_crc32 = crc32(~0, &match->key, key_len);
 
        spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
 
        found = rocker_flow_tbl_find(rocker, match);
 
        if (found) {
-               found->ref_count--;
-               if (found->ref_count == 0) {
-                       hash_del(&found->entry);
-                       del_from_hw = true;
-               }
+               hash_del(&found->entry);
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
        }
 
        spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
 
        kfree(match);
 
-       if (del_from_hw) {
+       if (found) {
                err = rocker_cmd_exec(rocker, rocker_port,
                                      rocker_cmd_flow_tbl_del,
                                      found, NULL, NULL, nowait);
@@ -2467,6 +2480,31 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
        return rocker_flow_tbl_do(rocker_port, flags, entry);
 }
 
+static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+                                         __be16 eth_type, __be32 dst,
+                                         __be32 dst_mask, u32 priority,
+                                         enum rocker_of_dpa_table_id goto_tbl,
+                                         u32 group_id, int flags)
+{
+       struct rocker_flow_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+       entry->key.priority = priority;
+       entry->key.ucast_routing.eth_type = eth_type;
+       entry->key.ucast_routing.dst4 = dst;
+       entry->key.ucast_routing.dst4_mask = dst_mask;
+       entry->key.ucast_routing.goto_tbl = goto_tbl;
+       entry->key.ucast_routing.group_id = group_id;
+       entry->key_len = offsetof(struct rocker_flow_tbl_key,
+                                 ucast_routing.group_id);
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
                               int flags, u32 in_pport,
                               u32 in_pport_mask,
@@ -2554,7 +2592,6 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_group_tbl_entry *found;
        unsigned long flags;
-       int err = 0;
 
        spin_lock_irqsave(&rocker->group_tbl_lock, flags);
 
@@ -2574,12 +2611,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
 
        spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
 
-       if (found->cmd)
-               err = rocker_cmd_exec(rocker, rocker_port,
-                                     rocker_cmd_group_tbl_add,
-                                     found, NULL, NULL, nowait);
-
-       return err;
+       return rocker_cmd_exec(rocker, rocker_port,
+                              rocker_cmd_group_tbl_add,
+                              found, NULL, NULL, nowait);
 }
 
 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
@@ -2675,13 +2709,253 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
                                       group_id);
 }
 
+static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
+                                  int flags, u32 index, u8 *src_mac,
+                                  u8 *dst_mac, __be16 vlan_id,
+                                  bool ttl_check, u32 pport)
+{
+       struct rocker_group_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
+       if (src_mac)
+               ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
+       if (dst_mac)
+               ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
+       entry->l3_unicast.vlan_id = vlan_id;
+       entry->l3_unicast.ttl_check = ttl_check;
+       entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
+
+       return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static struct rocker_neigh_tbl_entry *
+       rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+{
+       struct rocker_neigh_tbl_entry *found;
+
+       hash_for_each_possible(rocker->neigh_tbl, found,
+                              entry, be32_to_cpu(ip_addr))
+               if (found->ip_addr == ip_addr)
+                       return found;
+
+       return NULL;
+}
+
+static void _rocker_neigh_add(struct rocker *rocker,
+                             struct rocker_neigh_tbl_entry *entry)
+{
+       entry->index = rocker->neigh_tbl_next_index++;
+       entry->ref_count++;
+       hash_add(rocker->neigh_tbl, &entry->entry,
+                be32_to_cpu(entry->ip_addr));
+}
+
+static void _rocker_neigh_del(struct rocker *rocker,
+                             struct rocker_neigh_tbl_entry *entry)
+{
+       if (--entry->ref_count == 0) {
+               hash_del(&entry->entry);
+               kfree(entry);
+       }
+}
+
+static void _rocker_neigh_update(struct rocker *rocker,
+                                struct rocker_neigh_tbl_entry *entry,
+                                u8 *eth_dst, bool ttl_check)
+{
+       if (eth_dst) {
+               ether_addr_copy(entry->eth_dst, eth_dst);
+               entry->ttl_check = ttl_check;
+       } else {
+               entry->ref_count++;
+       }
+}
+
+static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
+                                 int flags, __be32 ip_addr, u8 *eth_dst)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_neigh_tbl_entry *entry;
+       struct rocker_neigh_tbl_entry *found;
+       unsigned long lock_flags;
+       __be16 eth_type = htons(ETH_P_IP);
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       u32 group_id;
+       u32 priority = 0;
+       bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+       bool updating;
+       bool removing;
+       int err = 0;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
+
+       found = rocker_neigh_tbl_find(rocker, ip_addr);
+
+       updating = found && adding;
+       removing = found && !adding;
+       adding = !found && adding;
+
+       if (adding) {
+               entry->ip_addr = ip_addr;
+               entry->dev = rocker_port->dev;
+               ether_addr_copy(entry->eth_dst, eth_dst);
+               entry->ttl_check = true;
+               _rocker_neigh_add(rocker, entry);
+       } else if (removing) {
+               memcpy(entry, found, sizeof(*entry));
+               _rocker_neigh_del(rocker, found);
+       } else if (updating) {
+               _rocker_neigh_update(rocker, found, eth_dst, true);
+               memcpy(entry, found, sizeof(*entry));
+       } else {
+               err = -ENOENT;
+       }
+
+       spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
+
+       if (err)
+               goto err_out;
+
+       /* For each active neighbor, we have an L3 unicast group and
+        * a /32 route to the neighbor, which uses the L3 unicast
+        * group.  The L3 unicast group can also be referred to by
+        * other routes' nexthops.
+        */
+
+       err = rocker_group_l3_unicast(rocker_port, flags,
+                                     entry->index,
+                                     rocker_port->dev->dev_addr,
+                                     entry->eth_dst,
+                                     rocker_port->internal_vlan_id,
+                                     entry->ttl_check,
+                                     rocker_port->pport);
+       if (err) {
+               netdev_err(rocker_port->dev,
+                          "Error (%d) L3 unicast group index %d\n",
+                          err, entry->index);
+               goto err_out;
+       }
+
+       if (adding || removing) {
+               group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
+               err = rocker_flow_tbl_ucast4_routing(rocker_port,
+                                                    eth_type, ip_addr,
+                                                    inet_make_mask(32),
+                                                    priority, goto_tbl,
+                                                    group_id, flags);
+
+               if (err)
+                       netdev_err(rocker_port->dev,
+                                  "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
+                                  err, &entry->ip_addr, group_id);
+       }
+
+err_out:
+       if (!adding)
+               kfree(entry);
+
+       return err;
+}
+
+static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
+                                   __be32 ip_addr)
+{
+       struct net_device *dev = rocker_port->dev;
+       struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
+       int err = 0;
+
+       if (!n)
+               n = neigh_create(&arp_tbl, &ip_addr, dev);
+       if (!n)
+               return -ENOMEM;
+
+       /* If the neigh is already resolved, then go ahead and
+        * install the entry, otherwise start the ARP process to
+        * resolve the neigh.
+        */
+
+       if (n->nud_state & NUD_VALID)
+               err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+       else
+               neigh_event_send(n, NULL);
+
+       return err;
+}
+
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+                              __be32 ip_addr, u32 *index)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_neigh_tbl_entry *entry;
+       struct rocker_neigh_tbl_entry *found;
+       unsigned long lock_flags;
+       bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+       bool updating;
+       bool removing;
+       bool resolved = true;
+       int err = 0;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
+
+       found = rocker_neigh_tbl_find(rocker, ip_addr);
+       if (found)
+               *index = found->index;
+
+       updating = found && adding;
+       removing = found && !adding;
+       adding = !found && adding;
+
+       if (adding) {
+               entry->ip_addr = ip_addr;
+               entry->dev = rocker_port->dev;
+               _rocker_neigh_add(rocker, entry);
+               *index = entry->index;
+               resolved = false;
+       } else if (removing) {
+               _rocker_neigh_del(rocker, found);
+       } else if (updating) {
+               _rocker_neigh_update(rocker, found, NULL, false);
+               resolved = !is_zero_ether_addr(found->eth_dst);
+       } else {
+               err = -ENOENT;
+       }
+
+       spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
+
+       if (!adding)
+               kfree(entry);
+
+       if (err)
+               return err;
+
+       /* Resolved means neigh ip_addr is resolved to neigh mac. */
+
+       if (!resolved)
+               err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+
+       return err;
+}
+
 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
                                        int flags, __be16 vlan_id)
 {
        struct rocker_port *p;
        struct rocker *rocker = rocker_port->rocker;
        u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
-       u32 group_ids[rocker->port_count];
+       u32 group_ids[ROCKER_FP_PORTS_MAX];
        u8 group_count = 0;
        int err;
        int i;
@@ -3429,6 +3703,51 @@ not_found:
        spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
 }
 
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
+                               int dst_len, struct fib_info *fi, u32 tb_id,
+                               int flags)
+{
+       struct fib_nh *nh;
+       __be16 eth_type = htons(ETH_P_IP);
+       __be32 dst_mask = inet_make_mask(dst_len);
+       __be16 internal_vlan_id = rocker_port->internal_vlan_id;
+       u32 priority = fi->fib_priority;
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       u32 group_id;
+       bool nh_on_port;
+       bool has_gw;
+       u32 index;
+       int err;
+
+       /* XXX support ECMP */
+
+       nh = fi->fib_nh;
+       nh_on_port = (fi->fib_dev == rocker_port->dev);
+       has_gw = !!nh->nh_gw;
+
+       if (has_gw && nh_on_port) {
+               err = rocker_port_ipv4_nh(rocker_port, flags,
+                                         nh->nh_gw, &index);
+               if (err)
+                       return err;
+
+               group_id = ROCKER_GROUP_L3_UNICAST(index);
+       } else {
+               /* Send to CPU for processing */
+               group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
+       }
+
+       err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+                                            dst_mask, priority, goto_tbl,
+                                            group_id, flags);
+       if (err)
+               netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
+                          err, &dst);
+
+       return err;
+}
+
 /*****************
  * Net device ops
  *****************/
@@ -3830,6 +4149,30 @@ static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
        return rocker_port_stp_update(rocker_port, state);
 }
 
+static int rocker_port_switch_fib_ipv4_add(struct net_device *dev,
+                                          __be32 dst, int dst_len,
+                                          struct fib_info *fi,
+                                          u8 tos, u8 type, u32 tb_id)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int flags = 0;
+
+       return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
+                                   fi, tb_id, flags);
+}
+
+static int rocker_port_switch_fib_ipv4_del(struct net_device *dev,
+                                          __be32 dst, int dst_len,
+                                          struct fib_info *fi,
+                                          u8 tos, u8 type, u32 tb_id)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int flags = ROCKER_OP_FLAG_REMOVE;
+
+       return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
+                                   fi, tb_id, flags);
+}
+
 static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_open                       = rocker_port_open,
        .ndo_stop                       = rocker_port_stop,
@@ -3844,6 +4187,8 @@ static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_bridge_getlink             = rocker_port_bridge_getlink,
        .ndo_switch_parent_id_get       = rocker_port_switch_parent_id_get,
        .ndo_switch_port_stp_update     = rocker_port_switch_port_stp_update,
+       .ndo_switch_fib_ipv4_add        = rocker_port_switch_fib_ipv4_add,
+       .ndo_switch_fib_ipv4_del        = rocker_port_switch_fib_ipv4_del,
 };
 
 /********************
@@ -4204,8 +4549,9 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
                       NAPI_POLL_WEIGHT);
        rocker_carrier_init(rocker_port);
 
-       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
-                               NETIF_F_HW_SWITCH_OFFLOAD;
+       dev->features |= NETIF_F_NETNS_LOCAL |
+                        NETIF_F_HW_VLAN_CTAG_FILTER |
+                        NETIF_F_HW_SWITCH_OFFLOAD;
 
        err = register_netdev(dev);
        if (err) {
@@ -4241,6 +4587,8 @@ static int rocker_probe_ports(struct rocker *rocker)
 
        alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
        rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+       if (!rocker->ports)
+               return -ENOMEM;
        for (i = 0; i < rocker->port_count; i++) {
                err = rocker_probe_port(rocker, i);
                if (err)
@@ -4544,6 +4892,48 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = {
        .notifier_call = rocker_netdevice_event,
 };
 
+/************************************
+ * Net event notifier event handler
+ ************************************/
+
+static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
+       __be32 ip_addr = *(__be32 *)n->primary_key;
+
+       return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+}
+
+static int rocker_netevent_event(struct notifier_block *unused,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *dev;
+       struct neighbour *n = ptr;
+       int err;
+
+       switch (event) {
+       case NETEVENT_NEIGH_UPDATE:
+               if (n->tbl != &arp_tbl)
+                       return NOTIFY_DONE;
+               dev = n->dev;
+               if (!rocker_port_dev_check(dev))
+                       return NOTIFY_DONE;
+               err = rocker_neigh_update(dev, n);
+               if (err)
+                       netdev_warn(dev,
+                                   "failed to handle neigh update (err %d)\n",
+                                   err);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netevent_nb __read_mostly = {
+       .notifier_call = rocker_netevent_event,
+};
+
 /***********************
  * Module init and exit
  ***********************/
@@ -4553,18 +4943,21 @@ static int __init rocker_module_init(void)
        int err;
 
        register_netdevice_notifier(&rocker_netdevice_nb);
+       register_netevent_notifier(&rocker_netevent_nb);
        err = pci_register_driver(&rocker_pci_driver);
        if (err)
                goto err_pci_register_driver;
        return 0;
 
 err_pci_register_driver:
+       unregister_netdevice_notifier(&rocker_netevent_nb);
        unregister_netdevice_notifier(&rocker_netdevice_nb);
        return err;
 }
 
 static void __exit rocker_module_exit(void)
 {
+       unregister_netevent_notifier(&rocker_netevent_nb);
        unregister_netdevice_notifier(&rocker_netdevice_nb);
        pci_unregister_driver(&rocker_pci_driver);
 }
index 0a94b7c300bec629707934305450f4e9722bd283..51e430d251389d05936aef44f65306fe0c42366a 100644 (file)
@@ -27,6 +27,8 @@ enum {
        ROCKER_ENOBUFS = 105,
 };
 
+#define ROCKER_FP_PORTS_MAX 62
+
 #define PCI_VENDOR_ID_REDHAT           0x1b36
 #define PCI_DEVICE_ID_REDHAT_ROCKER    0x0006
 
index c8a01ee4d25e339ba22fff4ee991b3691170093d..413ea14ab91f7471f2e0f001478846cdfd07c605 100644 (file)
@@ -422,11 +422,11 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
        /* assign queue number */
        tx_ring->queue_no = queue_no;
 
-       /* initalise counters */
+       /* initialise counters */
        tx_ring->dirty_tx = 0;
        tx_ring->cur_tx = 0;
 
-       /* initalise TX queue lock */
+       /* initialise TX queue lock */
        spin_lock_init(&tx_ring->tx_lock);
 
        return 0;
@@ -515,7 +515,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
                        goto err_free_rx_buffers;
        }
 
-       /* initalise counters */
+       /* initialise counters */
        rx_ring->cur_rx = 0;
        rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
        priv->dma_buf_sz = bfsize;
@@ -837,7 +837,7 @@ static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
        /* free the skbuffs of the ring */
        tx_free_ring_skbufs(tx_ring);
 
-       /* initalise counters */
+       /* initialise counters */
        tx_ring->cur_tx = 0;
        tx_ring->dirty_tx = 0;
 
@@ -1176,7 +1176,7 @@ static int sxgbe_open(struct net_device *dev)
        if (priv->phydev)
                phy_start(priv->phydev);
 
-       /* initalise TX coalesce parameters */
+       /* initialise TX coalesce parameters */
        sxgbe_tx_init_coalesce(priv);
 
        if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
@@ -1721,7 +1721,7 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
  *  Description:
  *  This function is a driver entry point whenever ifconfig command gets
  *  executed to see device statistics. Statistics are number of
- *  bytes sent or received, errors occured etc.
+ *  bytes sent or received, errors occurred etc.
  *  Return value:
  *  This function returns various statistical information of device.
  */
index 238482495e81fa3111554fc1d98fd0209e1499a8..33d2f9aa1b53262b39ad191236eef8d79a988ca1 100644 (file)
@@ -3215,7 +3215,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
        return status;
 }
 
-/* Fake a successfull reset, which will be performed later in efx_io_resume. */
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
 {
        struct efx_nic *efx = pci_get_drvdata(pdev);
index 75975328e0206ce94ca091b3b22201f43dfe02f4..bb89e96a125eab7bf0e5d8569f6119adb720dbb5 100644 (file)
@@ -645,7 +645,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx)
 }
 
 /* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * they're all flushed. Wait for the DRAIN events to be received so that there
  * are no more RX and TX events left on any channel. */
 static int efx_farch_do_flush(struct efx_nic *efx)
 {
@@ -1108,7 +1108,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 }
 
 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
  * the RX queue back to the mask of RX queues in need of flushing.
  */
 static void
index a707fb5ef14c752b0b48b01b9e228d2235e6c238..e028de10e1b743d2e9adf6334d0ad3cbdb176001 100644 (file)
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
index a8bbbad68a88e6e3c2c9b7480bdb88582b8abf41..fe83430796fd04f3aa502de8f324f107380a680f 100644 (file)
@@ -1067,7 +1067,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
 }
 
 /* Copy the list of individual addresses into the vfdi_status.peers
- * array and auxillary pages, protected by %local_lock. Drop that lock
+ * array and auxiliary pages, protected by %local_lock. Drop that lock
  * and then broadcast the address list to every VF.
  */
 static void efx_siena_sriov_peer_work(struct work_struct *data)
index ae044f44936a2fd49bf9ff2728dd6e7fe7700baf..f62901d4cae0e033d9b4e9849cf0d54c378b5be0 100644 (file)
@@ -98,7 +98,7 @@ struct vfdi_endpoint {
  * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
  * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
  *     finalize the SRAM entries.
- * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
  * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
  * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
  *     from PF and write the initial status.
@@ -148,7 +148,7 @@ enum vfdi_op {
  * @u.init_txq.flags: Checksum offload flags.
  * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
  *     address of each page backing the transmit queue.
- * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
  *     all traffic at this receive queue.
  * @u.mac_filter.flags: MAC filter flags.
  * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
index 6b33127ab352a43ed6a787af7eedde554241e1b3..3449893aea8d402fb2fc56582df92a04aa157c10 100644 (file)
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
     smc->packets_waiting = 0;
 
     smc_reset(dev);
-    init_timer(&smc->media);
-    smc->media.function = media_check;
-    smc->media.data = (u_long) dev;
-    smc->media.expires = jiffies + HZ;
-    add_timer(&smc->media);
+    setup_timer(&smc->media, media_check, (u_long)dev);
+    mod_timer(&smc->media, jiffies + HZ);
 
     return 0;
 } /* smc_open */
index 88a55f95fe09bc544b0acf0e28bae507086788f8..209ee1b27f8d75aa2a3efa12c74ae28484f465c5 100644 (file)
@@ -91,6 +91,10 @@ static const char version[] =
 
 #include "smc91x.h"
 
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/neponset.h>
+#endif
+
 #ifndef SMC_NOWAIT
 # define SMC_NOWAIT            0
 #endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
        ret = smc_request_attrib(pdev, ndev);
        if (ret)
                goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
-       neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+       if (machine_is_assabet() && machine_has_neponset())
+               neponset_ncr_set(NCR_ENET_OSC_EN);
 #endif
        platform_set_drvdata(pdev, ndev);
        ret = smc_enable_device(pdev);
index be67baf5f6778d08df4eaa06216914b77ab8f2b5..3a18501d1068c36816554f953e367ff1439c2a36 100644 (file)
  * Define your architecture specific bus configuration parameters here.
  */
 
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
-    defined(CONFIG_MACH_MAINSTONE) ||\
-    defined(CONFIG_MACH_ZYLONITE) ||\
-    defined(CONFIG_MACH_LITTLETON) ||\
-    defined(CONFIG_MACH_ZYLONITE2) ||\
-    defined(CONFIG_ARCH_VIPER) ||\
-    defined(CONFIG_MACH_STARGATE2) ||\
-    defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
 
 #include <asm/mach-types.h>
 
 /* We actually can't write halfwords properly if not word aligned */
 static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 {
-       if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
-               unsigned int v = val << 16;
-               v |= readl(ioaddr + (reg & ~2)) & 0xffff;
-               writel(v, ioaddr + (reg & ~2));
-       } else {
-               writew(val, ioaddr + reg);
-       }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, (l))
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS          (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      0
-#define SMC_CAN_USE_32BIT      0
-#define SMC_NOWAIT             1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT           2
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS          (-1)    /* from resource */
-
-#elif  defined(CONFIG_MACH_LOGICPD_PXA270) ||  \
-       defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT       0
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#elif  defined(CONFIG_ARCH_INNOKOM) || \
-       defined(CONFIG_ARCH_PXA_IDP) || \
-       defined(CONFIG_ARCH_RAMSES) || \
-       defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      1
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-#define SMC_USE_PXA_DMA                1
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_inl(a, r)          readl((a) + (r))
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_outl(v, a, r)      writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l)  writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS          (-1)    /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
-       if (reg & 2) {
+       if ((machine_is_mainstone() || machine_is_stargate2() ||
+            machine_is_pxa_idp()) && reg & 2) {
                unsigned int v = val << 16;
                v |= readl(ioaddr + (reg & ~2)) & 0xffff;
                writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 #define RPC_LSA_DEFAULT         RPC_LED_100_10
 #define RPC_LSB_DEFAULT         RPC_LED_TX_RX
 
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT       0
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_NOWAIT             1
-
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS          IRQF_TRIGGER_HIGH
-
 #elif defined(CONFIG_COLDFIRE)
 
 #define SMC_CAN_USE_8BIT       0
index 2965c6ae7d6e4692ff491185550548bf985cf681..41047c9143d0a66cde1441311fb5feb3ce0796d0 100644 (file)
@@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
        unsigned long flags;
 
        /* Initialise tx packet using broadcast destination address */
-       memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN);
+       eth_broadcast_addr(pdata->loopback_tx_pkt);
 
        /* Use incrementing source address */
        for (i = 6; i < 12; i++)
index e97074cd5800a7c67466b60925cb6dfaf6aaab0b..5a36bd2c7837d3f4c84e9344ba6bc040e872af2c 100644 (file)
@@ -91,7 +91,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
                                                  STMMAC_RESOURCE_NAME);
        if (IS_ERR(dwmac->stmmac_rst)) {
                dev_info(dev, "Could not get reset control!\n");
-               return -EINVAL;
+               if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dwmac->stmmac_rst = NULL;
        }
 
        dwmac->interface = of_get_phy_mode(np);
index 55e89b3838f1cb60df3f2f751ba254eddbef8fa2..5336594abed1c373259b67f1a6cb8ef1866fc92d 100644 (file)
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
-                       init_timer(&priv->eee_ctrl_timer);
-                       priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
-                       priv->eee_ctrl_timer.data = (unsigned long)priv;
-                       priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
-                       add_timer(&priv->eee_ctrl_timer);
+                       setup_timer(&priv->eee_ctrl_timer,
+                                   stmmac_eee_ctrl_timer,
+                                   (unsigned long)priv);
+                       mod_timer(&priv->eee_ctrl_timer,
+                                 STMMAC_LPI_T(eee_timer));
 
                        priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
@@ -609,7 +609,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                 * where, freq_div_ratio = clk_ptp_ref_i/50MHz
                 * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
                 * NOTE: clk_ptp_ref_i should be >= 50MHz to
-                *       achive 20ns accuracy.
+                *       achieve 20ns accuracy.
                 *
                 * 2^x * y == (y << x), hence
                 * 2^32 * 50000000 ==> (50000000 << 32)
index 4b51f903fb733cba9b9b8a3fe9539fe3bc811c84..0c5842aeb807014c632a2d713b366133d7021f56 100644 (file)
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
                *flow_type = IP_USER_FLOW;
                break;
        default:
-               return 0;
+               return -EINVAL;
        }
 
-       return 1;
+       return 0;
 }
 
 static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
        class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
                TCAM_V4KEY0_CLASS_CODE_SHIFT;
        ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
        if (ret < 0) {
                netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
                            parent->index);
-               ret = -EINVAL;
                goto out;
        }
 
index fef5dec2cffe9c3bb7f09bbe728ab2dc54b0cba9..74e9b148378c1b1e1551976f1bfe4aa7c7920abe 100644 (file)
@@ -2175,7 +2175,7 @@ static int gem_do_start(struct net_device *dev)
        }
 
        /* Mark us as attached again if we come from resume(), this has
-        * no effect if we weren't detatched and needs to be done now.
+        * no effect if we weren't detached and needs to be done now.
         */
        netif_device_attach(dev);
 
@@ -2794,7 +2794,7 @@ static void gem_remove_one(struct pci_dev *pdev)
 
                unregister_netdev(dev);
 
-               /* Ensure reset task is truely gone */
+               /* Ensure reset task is truly gone */
                cancel_work_sync(&gp->reset_task);
 
                /* Free resources */
index 3bc992cd70b7de4449afec00cdab816d9910ec22..f6a71092e1359ea095aad0aeca093065080ceb5c 100644 (file)
@@ -50,7 +50,7 @@ config TI_DAVINCI_CPDMA
          will be called davinci_cpdma.  This is recommended.
 
 config TI_CPSW_PHY_SEL
-       boolean "TI CPSW Switch Phy sel Support"
+       bool "TI CPSW Switch Phy sel Support"
        depends on TI_CPSW
        ---help---
          This driver supports configuring of the phy mode connected to
@@ -77,7 +77,7 @@ config TI_CPSW
          will be called cpsw.
 
 config TI_CPTS
-       boolean "TI Common Platform Time Sync (CPTS) Support"
+       bool "TI Common Platform Time Sync (CPTS) Support"
        depends on TI_CPSW
        select PTP_1588_CLOCK
        ---help---
index 7d8dd0d2182ef9f8d94d1e84b3c7a45f3364347c..b536b4c82752a233e7c18d4f5194041f6dc80e20 100644 (file)
@@ -726,7 +726,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
                if (ndev_status && (status >= 0)) {
                        /* The packet received is for the interface which
                         * is already down and the other interface is up
-                        * and running, intead of freeing which results
+                        * and running, instead of freeing which results
                         * in reducing of the number of rx descriptor in
                         * DMA engine, requeue skb back to cpdma.
                         */
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
        cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                           port_mask, ALE_VLAN, slave->port_vlan, 0);
        cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-               priv->host_port, ALE_VLAN, slave->port_vlan);
+               priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cpsw_suspend(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
        }
        return 0;
 }
+#endif
 
-static const struct dev_pm_ops cpsw_pm_ops = {
-       .suspend        = cpsw_suspend,
-       .resume         = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
 static const struct of_device_id cpsw_of_mtable[] = {
        { .compatible = "ti,cpsw", },
index 98655b44b97e2d7690ef2fa28156730697098d2b..c00084d689f3ba99fe846c2e50f5b21daec73189 100644 (file)
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int davinci_mdio_suspend(struct device *dev)
 {
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops davinci_mdio_pm_ops = {
-       .suspend_late   = davinci_mdio_suspend,
-       .resume_early   = davinci_mdio_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
 };
 
 #if IS_ENABLED(CONFIG_OF)
index a31a8c3c8e7c74c91651a934eac853625805d2ff..9f14d8b515c74360355c9850adc4ecad5e044795 100644 (file)
@@ -1320,7 +1320,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
        if (addr)
                ether_addr_copy(naddr->addr, addr);
        else
-               memset(naddr->addr, 0, ETH_ALEN);
+               eth_zero_addr(naddr->addr);
        list_add_tail(&naddr->node, &netcp->addr_list);
 
        return naddr;
index bb79928046645d62fbb238614777b00600b62b0d..ac62a5e248b0b011fbdf1c8e1172c70ed0892965 100644 (file)
@@ -1065,7 +1065,7 @@ refill:
 
        /*
         * this call can fail, but for now, just leave this
-        * decriptor without skb
+        * descriptor without skb
         */
        gelic_descr_prepare_rx(card, descr);
 
index 0a7f2e77557f63eb8920c0b80c9552ec17e056ec..13214a6492ac5b1eced4d39c21b7736f5dcf19d4 100644 (file)
@@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev,
        } else {
                pr_debug("%s: clear bssid\n", __func__);
                clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
-               memset(wl->bssid, 0, ETH_ALEN);
+               eth_zero_addr(wl->bssid);
        }
        spin_unlock_irqrestore(&wl->lock, irqflag);
        pr_debug("%s: ->\n", __func__);
@@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev,
                memcpy(data->ap_addr.sa_data, wl->active_bssid,
                       ETH_ALEN);
        } else
-               memset(data->ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(data->ap_addr.sa_data);
 
        spin_unlock_irqrestore(&wl->lock, irqflag);
        mutex_unlock(&wl->assoc_stat_lock);
index a495931a66a1f217216cdb90a6b6989881039f32..f5498c26b3c7076c8dcba593f1c900b078ff6bca 100644 (file)
@@ -56,7 +56,7 @@ MODULE_LICENSE("GPL");
 
 #define W5100_S0_REGS          0x0400
 #define W5100_S0_MR            0x0400 /* S0 Mode Register */
-#define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscuous) */
 #define   S0_MR_MACRAW_MF        0x44 /* MAC RAW mode (filtered) */
 #define W5100_S0_CR            0x0401 /* S0 Command Register */
 #define   S0_CR_OPEN             0x01 /* OPEN command */
index 09322d9db5785ccb622a3ee3ec854442576bd537..ca0c631ed62842587684210f43c2aa7582291ad6 100644 (file)
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
 #define   IDR_W5300              0x5300  /* =0x5300 for WIZnet W5300 */
 #define W5300_S0_MR            0x0200  /* S0 Mode Register */
 #define   S0_MR_CLOSED           0x0000  /* Close mode */
-#define   S0_MR_MACRAW           0x0004  /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW           0x0004  /* MAC RAW mode (promiscuous) */
 #define   S0_MR_MACRAW_MF        0x0044  /* MAC RAW mode (filtered) */
 #define W5300_S0_CR            0x0202  /* S0 Command Register */
 #define   S0_CR_OPEN             0x0001  /* OPEN command */
index f7e0f0f7c2e27dd19b2cbc674644cd4678074c2c..5138407941cf1d90f4d4b1f7c6fa9c3ff92f5294 100644 (file)
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
        int i;
        static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
-       if (dev->flags & IFF_ALLMULTI) {
+       if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
                for (i = 0; i < ETH_ALEN; i++) {
                        __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
                        __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
@@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev)
                return;
        }
 
-       memset(diffs, 0, ETH_ALEN);
+       eth_zero_addr(diffs);
 
        addr = NULL;
        netdev_for_each_mc_addr(ha, dev) {
index 0b8393ca8c80134b444e5d3bf69d3ed58552b644..7c4a4151ef0f23fca16b0fd957769c09dc5823d0 100644 (file)
@@ -247,6 +247,9 @@ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct sixpack *sp = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        spin_lock_bh(&sp->lock);
        /* We were not busy, so we are now... :-) */
        netif_stop_queue(dev);
@@ -302,7 +305,6 @@ static const struct net_device_ops sp_netdev_ops = {
        .ndo_stop               = sp_close,
        .ndo_start_xmit         = sp_xmit,
        .ndo_set_mac_address    = sp_set_mac_address,
-       .ndo_neigh_construct    = ax25_neigh_construct,
 };
 
 static void sp_setup(struct net_device *dev)
@@ -316,7 +318,6 @@ static void sp_setup(struct net_device *dev)
 
        dev->addr_len           = AX25_ADDR_LEN;
        dev->type               = ARPHRD_AX25;
-       dev->neigh_priv_len     = sizeof(struct ax25_neigh_priv);
        dev->tx_queue_len       = 10;
 
        /* Only activated in AX.25 mode */
index 3539ab392f7dc9af3713a6dfd4ec1e978537f470..83c7cce0d172b205f0b1d96405b6b66eea79ea55 100644 (file)
@@ -772,6 +772,9 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
 {
        struct baycom_state *bc = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (skb->data[0] != 0) {
                do_kiss_params(bc, skb->data, skb->len);
                dev_kfree_skb(skb);
@@ -1109,7 +1112,6 @@ static const struct net_device_ops baycom_netdev_ops = {
        .ndo_do_ioctl        = baycom_ioctl,
        .ndo_start_xmit      = baycom_send_packet,
        .ndo_set_mac_address = baycom_set_mac_address,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 /*
@@ -1147,7 +1149,6 @@ static void baycom_probe(struct net_device *dev)
        dev->header_ops = &ax25_header_ops;
        
        dev->type = ARPHRD_AX25;           /* AF_AX25 device */
-       dev->neigh_priv_len = sizeof(struct ax25_neigh_priv);
        dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
        dev->mtu = AX25_DEF_PACLEN;        /* eth_mtu is the default */
        dev->addr_len = AX25_ADDR_LEN;     /* sizeof an ax.25 address */
index bce105b16ed0771460e1a03205c1bb64b213f2bb..63ff08a26da81b5bafc401fbfeeb710a6b595a64 100644 (file)
@@ -251,6 +251,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_device *orig_dev;
        int size;
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        /*
         * Just to be *really* sure not to send anything if the interface
         * is down, the ethernet device may have gone.
@@ -469,7 +472,6 @@ static const struct net_device_ops bpq_netdev_ops = {
        .ndo_start_xmit      = bpq_xmit,
        .ndo_set_mac_address = bpq_set_mac_address,
        .ndo_do_ioctl        = bpq_ioctl,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 static void bpq_setup(struct net_device *dev)
@@ -487,7 +489,6 @@ static void bpq_setup(struct net_device *dev)
 #endif
 
        dev->type            = ARPHRD_AX25;
-       dev->neigh_priv_len  = sizeof(struct ax25_neigh_priv);
        dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
        dev->mtu             = AX25_DEF_PACLEN;
        dev->addr_len        = AX25_ADDR_LEN;
index abab7be77406387fd8442908bfcbe6989d76d083..c3d37777061631d1c34fb1b1e05f36986bb5f6cc 100644 (file)
@@ -433,7 +433,6 @@ module_exit(dmascc_exit);
 static void __init dev_setup(struct net_device *dev)
 {
        dev->type = ARPHRD_AX25;
-       dev->neigh_priv_len = sizeof(struct ax25_neigh_priv);
        dev->hard_header_len = AX25_MAX_HEADER_LEN;
        dev->mtu = 1500;
        dev->addr_len = AX25_ADDR_LEN;
@@ -448,7 +447,6 @@ static const struct net_device_ops scc_netdev_ops = {
        .ndo_start_xmit = scc_send_packet,
        .ndo_do_ioctl = scc_ioctl,
        .ndo_set_mac_address = scc_set_mac_address,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 static int __init setup_adapter(int card_base, int type, int n)
@@ -922,6 +920,9 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
        int i;
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        /* Temporarily stop the scheduler feeding us packets */
        netif_stop_queue(dev);
 
index 435868a7b69cdcdde15d05988adb1f66f737fa11..49fe59b180a8619f554d5852202df933fee54871 100644 (file)
@@ -404,6 +404,9 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
 {
        struct hdlcdrv_state *sm = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (skb->data[0] != 0) {
                do_kiss_params(sm, skb->data, skb->len);
                dev_kfree_skb(skb);
@@ -626,7 +629,6 @@ static const struct net_device_ops hdlcdrv_netdev = {
        .ndo_start_xmit = hdlcdrv_send_packet,
        .ndo_do_ioctl   = hdlcdrv_ioctl,
        .ndo_set_mac_address = hdlcdrv_set_mac_address,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 /*
@@ -677,7 +679,6 @@ static void hdlcdrv_setup(struct net_device *dev)
        dev->header_ops = &ax25_header_ops;
        
        dev->type = ARPHRD_AX25;           /* AF_AX25 device */
-       dev->neigh_priv_len = sizeof(struct ax25_neigh_priv);
        dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
        dev->mtu = AX25_DEF_PACLEN;        /* eth_mtu is the default */
        dev->addr_len = AX25_ADDR_LEN;     /* sizeof an ax.25 address */
index c12ec2c2b594a79e941ea7a9256c3bc5f8cc8c32..2ffbf13471d09ad4c27d8c70fbb4dd3145befa75 100644 (file)
@@ -529,6 +529,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mkiss *ax = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (!netif_running(dev))  {
                printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
                return NETDEV_TX_BUSY;
@@ -554,11 +557,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* We were not busy, so we are now... :-) */
-       if (skb != NULL) {
-               netif_stop_queue(dev);
-               ax_encaps(dev, skb->data, skb->len);
-               kfree_skb(skb);
-       }
+       netif_stop_queue(dev);
+       ax_encaps(dev, skb->data, skb->len);
+       kfree_skb(skb);
 
        return NETDEV_TX_OK;
 }
@@ -641,7 +642,6 @@ static const struct net_device_ops ax_netdev_ops = {
        .ndo_stop            = ax_close,
        .ndo_start_xmit      = ax_xmit,
        .ndo_set_mac_address = ax_set_mac_address,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 static void ax_setup(struct net_device *dev)
@@ -651,7 +651,6 @@ static void ax_setup(struct net_device *dev)
        dev->hard_header_len = 0;
        dev->addr_len        = 0;
        dev->type            = ARPHRD_AX25;
-       dev->neigh_priv_len  = sizeof(struct ax25_neigh_priv);
        dev->tx_queue_len    = 10;
        dev->header_ops      = &ax25_header_ops;
        dev->netdev_ops      = &ax_netdev_ops;
index b305f51eb42015316b7943f382165ad36c847408..ce88df33fe17b20237b3f4f2e0c9e1ca7b4b63a3 100644 (file)
@@ -1550,7 +1550,6 @@ static const struct net_device_ops scc_netdev_ops = {
        .ndo_set_mac_address = scc_net_set_mac_address,
        .ndo_get_stats       = scc_net_get_stats,
        .ndo_do_ioctl        = scc_net_ioctl,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 /* ----> Initialize device <----- */
@@ -1568,7 +1567,6 @@ static void scc_net_setup(struct net_device *dev)
        dev->flags      = 0;
 
        dev->type = ARPHRD_AX25;
-       dev->neigh_priv_len = sizeof(struct ax25_neigh_priv);
        dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
        dev->mtu = AX25_DEF_PACLEN;
        dev->addr_len = AX25_ADDR_LEN;
@@ -1641,6 +1639,9 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
        char kisscmd;
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        if (skb->len > scc->stat.bufsize || skb->len < 2) {
                scc->dev_stat.tx_dropped++;     /* bogus frame */
                dev_kfree_skb(skb);
index 89d9da7a0c51eebf7fa4b338d21f8a85119ac3eb..1a4729c36aa49d93f8e536cd98d4731df704621d 100644 (file)
@@ -597,6 +597,9 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb,
 {
        struct yam_port *yp = netdev_priv(dev);
 
+       if (skb->protocol == htons(ETH_P_IP))
+               return ax25_ip_xmit(skb);
+
        skb_queue_tail(&yp->send_queue, skb);
        dev->trans_start = jiffies;
        return NETDEV_TX_OK;
@@ -1100,7 +1103,6 @@ static const struct net_device_ops yam_netdev_ops = {
        .ndo_start_xmit      = yam_send_packet,
        .ndo_do_ioctl        = yam_ioctl,
        .ndo_set_mac_address = yam_set_mac_address,
-       .ndo_neigh_construct = ax25_neigh_construct,
 };
 
 static void yam_setup(struct net_device *dev)
@@ -1129,7 +1131,6 @@ static void yam_setup(struct net_device *dev)
        dev->header_ops = &ax25_header_ops;
 
        dev->type = ARPHRD_AX25;
-       dev->neigh_priv_len = sizeof(struct ax25_neigh_priv);
        dev->hard_header_len = AX25_MAX_HEADER_LEN;
        dev->mtu = AX25_MTU;
        dev->addr_len = AX25_ADDR_LEN;
index 1e51c6bf3ae13545673c97eb81cbe8914fd18555..8362aef0c15e534241babfddbcd6cd6912ed1a8c 100644 (file)
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
        } /* else everything is zero */
 }
 
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                                struct iov_iter *from, int noblock)
 {
-       int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+       int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
        struct sk_buff *skb;
        struct macvlan_dev *vlan;
        unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                        linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
        }
 
-       skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+       skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
                                linear, noblock, &err);
        if (!skb)
                goto err;
index ba2f5e710af12cc7587f0503ea8dc177d2aba4ed..15731d1db918c32f6dc341800155340c0e62a7a2 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/netpoll.h>
 #include <linux/inet.h>
 #include <linux/configfs.h>
+#include <linux/etherdevice.h>
 
 MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
 MODULE_DESCRIPTION("Console driver for network interfaces");
@@ -185,7 +186,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
        nt->np.local_port = 6665;
        nt->np.remote_port = 6666;
        mutex_init(&nt->mutex);
-       memset(nt->np.remote_mac, 0xff, ETH_ALEN);
+       eth_broadcast_addr(nt->np.remote_mac);
 
        /* Parse parameters and setup netpoll */
        err = netpoll_parse_options(&nt->np, target_config);
@@ -604,7 +605,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
        nt->np.local_port = 6665;
        nt->np.remote_port = 6666;
        mutex_init(&nt->mutex);
-       memset(nt->np.remote_mac, 0xff, ETH_ALEN);
+       eth_broadcast_addr(nt->np.remote_mac);
 
        /* Initialize the config_item member */
        config_item_init_type_name(&nt->item, name, &netconsole_target_type);
index 9e3af54c90102a2c113596d326d893670b7e6c24..32efbd48f32642ddabb21126384b0c21e160a403 100644 (file)
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XGBE_PHY_CDR_RATE_PROPERTY     "amd,serdes-cdr-rate"
 #define XGBE_PHY_PQ_SKEW_PROPERTY      "amd,serdes-pq-skew"
 #define XGBE_PHY_TX_AMP_PROPERTY       "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY      "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY      "amd,serdes-dfe-tap-enable"
 
 #define XGBE_PHY_SPEEDS                        3
 #define XGBE_PHY_SPEED_1000            0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_10000_BLWC               0
 #define SPEED_10000_CDR                        0x7
 #define SPEED_10000_PLL                        0x1
-#define SPEED_10000_PQ                 0x1e
+#define SPEED_10000_PQ                 0x12
 #define SPEED_10000_RATE               0x0
 #define SPEED_10000_TXAMP              0xa
 #define SPEED_10000_WORD               0x7
+#define SPEED_10000_DFE_TAP_CONFIG     0x1
+#define SPEED_10000_DFE_TAP_ENABLE     0x7f
 
 #define SPEED_2500_BLWC                        1
 #define SPEED_2500_CDR                 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_2500_RATE                        0x1
 #define SPEED_2500_TXAMP               0xf
 #define SPEED_2500_WORD                        0x1
+#define SPEED_2500_DFE_TAP_CONFIG      0x3
+#define SPEED_2500_DFE_TAP_ENABLE      0x0
 
 #define SPEED_1000_BLWC                        1
 #define SPEED_1000_CDR                 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_1000_RATE                        0x3
 #define SPEED_1000_TXAMP               0xf
 #define SPEED_1000_WORD                        0x1
+#define SPEED_1000_DFE_TAP_CONFIG      0x3
+#define SPEED_1000_DFE_TAP_ENABLE      0x0
 
 /* SerDes RxTx register offsets */
+#define RXTX_REG6                      0x0018
 #define RXTX_REG20                     0x0050
+#define RXTX_REG22                     0x0058
 #define RXTX_REG114                    0x01c8
+#define RXTX_REG129                    0x0204
 
 /* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX     8
+#define RXTX_REG6_RESETB_RXD_WIDTH     1
 #define RXTX_REG20_BLWC_ENA_INDEX      2
 #define RXTX_REG20_BLWC_ENA_WIDTH      1
 #define RXTX_REG114_PQ_REG_INDEX       9
 #define RXTX_REG114_PQ_REG_WIDTH       7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
 
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
        SPEED_10000_TXAMP,
 };
 
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+       SPEED_1000_DFE_TAP_CONFIG,
+       SPEED_2500_DFE_TAP_CONFIG,
+       SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+       SPEED_1000_DFE_TAP_ENABLE,
+       SPEED_2500_DFE_TAP_ENABLE,
+       SPEED_10000_DFE_TAP_ENABLE,
+};
+
 enum amd_xgbe_phy_an {
        AMD_XGBE_AN_READY = 0,
        AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
        u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
        u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
        u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+       u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+       u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
 
        /* Auto-negotiation state machine support */
        struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
                status = XSIR0_IOREAD(priv, SIR0_STATUS);
                if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
                    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
-                       return;
+                       goto rx_reset;
        }
 
        netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
                   status);
+
+rx_reset:
+       /* Perform Rx reset for the DFE changes */
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
 }
 
 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                       sizeof(priv->serdes_tx_amp));
        }
 
+       if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_DFE_CFG_PROPERTY,
+                                                    priv->serdes_dfe_tap_cfg,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_DFE_CFG_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_dfe_tap_cfg,
+                      amd_xgbe_phy_serdes_dfe_tap_cfg,
+                      sizeof(priv->serdes_dfe_tap_cfg));
+       }
+
+       if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_DFE_ENA_PROPERTY,
+                                                    priv->serdes_dfe_tap_ena,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_DFE_ENA_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_dfe_tap_ena,
+                      amd_xgbe_phy_serdes_dfe_tap_ena,
+                      sizeof(priv->serdes_dfe_tap_ena));
+       }
+
        phydev->priv = priv;
 
        if (!priv->adev || acpi_disabled)
index cdcac6aa4260b32927d7c903e024b42e5d17861e..52cd8db2c57daad2767dec72149f4cdabbcf6917 100644 (file)
@@ -235,6 +235,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
        return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
 }
 
+/**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ *                  speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+       unsigned int idx;
+
+       idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+       return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+               (settings[idx].setting & features);
+}
+
 /**
  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  * @phydev: the target phy_device struct
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
                int status;
-               unsigned int idx;
 
                /* Read phy status to properly get the right settings */
                status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
-               idx = phy_find_setting(phydev->speed, phydev->duplex);
-               if (!(lp & adv & settings[idx].setting))
+               if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
                        goto eee_exit_err;
 
                if (clk_stop_enable) {
index a7d163bf5bbb50a0cc6368210090f90690ecae56..9d3366f7c9ad98b75a4c28992a23f4fa86922eb1 100644 (file)
@@ -43,9 +43,7 @@
 
 static struct team_port *team_port_get_rcu(const struct net_device *dev)
 {
-       struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
-       return team_port_exists(dev) ? port : NULL;
+       return rcu_dereference(dev->rx_handler_data);
 }
 
 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
index 37eed4d84e9cb458b2ac5d3b7248b0835115c67a..7ba8d0885f120156c47f44884212a2fd73f604b9 100644 (file)
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
            * Linksys USB200M
            * Netgear FA120
            * Sitecom LN-029
+           * Sitecom LN-028
            * Intellinet USB 2.0 Ethernet
            * ST Lab USB 2.0 Ethernet
            * TrendNet TU2-ET100
@@ -397,14 +398,14 @@ config USB_NET_CDC_SUBSET
          not generally have permanently assigned Ethernet addresses.
 
 config USB_ALI_M5632
-       boolean "ALi M5632 based 'USB 2.0 Data Link' cables"
+       bool "ALi M5632 based 'USB 2.0 Data Link' cables"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option if you're using a host-to-host cable
          based on this design, which supports USB 2.0 high speed.
 
 config USB_AN2720
-       boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
+       bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option if you're using a host-to-host cable
@@ -412,7 +413,7 @@ config USB_AN2720
          Cypress brand.
 
 config USB_BELKIN
-       boolean "eTEK based host-to-host cables (Advance, Belkin, ...)"
+       bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
        depends on USB_NET_CDC_SUBSET
        default y
        help
@@ -421,7 +422,7 @@ config USB_BELKIN
          microcontroller, with LEDs that indicate traffic.
 
 config USB_ARMLINUX
-       boolean "Embedded ARM Linux links (iPaq, ...)"
+       bool "Embedded ARM Linux links (iPaq, ...)"
        depends on USB_NET_CDC_SUBSET
        default y
        help
@@ -438,14 +439,14 @@ config USB_ARMLINUX
          this simpler protocol by installing a different kernel.
 
 config USB_EPSON2888
-       boolean "Epson 2888 based firmware (DEVELOPMENT)"
+       bool "Epson 2888 based firmware (DEVELOPMENT)"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option to support the usb networking links used
          by some sample firmware from Epson.
 
 config USB_KC2190
-       boolean "KT Technology KC2190 based cables (InstaNet)"
+       bool "KT Technology KC2190 based cables (InstaNet)"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option if you're using a host-to-host cable
index bf49792062a2b40c2f1bd2f5a06e6eff8954ab90..1173a24feda38c3af236c84acaf8982f39c0e0b1 100644 (file)
@@ -978,6 +978,10 @@ static const struct usb_device_id  products [] = {
        // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
        USB_DEVICE (0x0df6, 0x0056),
        .driver_info =  (unsigned long) &ax88178_info,
+}, {
+       // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+       USB_DEVICE (0x0df6, 0x061c),
+       .driver_info =  (unsigned long) &ax88178_info,
 }, {
        // corega FEther USB2-TX
        USB_DEVICE (0x07aa, 0x0017),
index 8cfc3bb0c6a672a288784ab0dd5f09597265c39d..4e2b26a88b15f03ba8302d5d7a83d1a5b1a4d4ea 100644 (file)
@@ -641,7 +641,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
        u8 broadcast[ETH_ALEN];
        u8 rx = RxEnable | RxPolarity | RxMultiCast;
 
-       memset(broadcast, 0xff, ETH_ALEN);
+       eth_broadcast_addr(broadcast);
        memset(catc->multicast, 0, 64);
 
        catc_multicast(broadcast, catc->multicast);
@@ -880,7 +880,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                
                dev_dbg(dev, "Filling the multicast list.\n");
          
-               memset(broadcast, 0xff, ETH_ALEN);
+               eth_broadcast_addr(broadcast);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
index 96fc8a5bde8416a471ed4cd07e657025bc222fab..e4b7a47a825c7f686e48992b23d3f1ee30555d71 100644 (file)
@@ -394,7 +394,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
        skb_put(skb, ETH_HLEN);
        skb_reset_mac_header(skb);
        eth_hdr(skb)->h_proto = proto;
-       memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+       eth_zero_addr(eth_hdr(skb)->h_source);
        memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
 
        /* add datagram */
index 3c8dfe5e46ed3cdf6dd162bceb3d2b94e6cf8c06..111d907e0c117e8f8efb4b6c3b50607994f6c7b9 100644 (file)
@@ -1597,7 +1597,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
                }
                cprev = cnow;
        }
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        remove_wait_queue(&tiocmget->waitq, &wait);
 
        return ret;
index 8f37efd2d2fbb3ec05fcde896b8b9e88136c2f35..5714107533bb4292b94c85d064726ac4f89f496b 100644 (file)
@@ -201,7 +201,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                                        &buf->data[sizeof(*ethhdr) + 0x12],
                                        ETH_ALEN);
                } else {
-                       memset(ethhdr->h_source, 0, ETH_ALEN);
+                       eth_zero_addr(ethhdr->h_source);
                        memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN);
 
                        /* Inbound IPv6 packets have an IPv4 ethertype (0x800)
index 3d18bb0eee8528ece6509ec3848c3044ff5804ce..1bfe0fcaccf5ba31bf125f898ec6c624f506206e 100644 (file)
@@ -134,6 +134,11 @@ static const struct usb_device_id  products [] = {
 }, {
        USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
        .driver_info =  (unsigned long) &prolific_info,
+}, {
+       USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
+                                        * Host-to-Host Cable
+                                        */
+       .driver_info =  (unsigned long) &prolific_info,
 },
 
        { },            // END
index 602dc6668c3af7ce9f6cc4ddd61437ba2f6adf29..f603f362504bce0c1cb2656e1d29232eb05db846 100644 (file)
@@ -108,7 +108,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        skb_push(skb, ETH_HLEN);
        skb_reset_mac_header(skb);
        eth_hdr(skb)->h_proto = proto;
-       memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+       eth_zero_addr(eth_hdr(skb)->h_source);
 fix_dest:
        memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
        return 1;
index 110a2cf67244c8946a295c09d7c63b3297f0f0d3..f1ff3666f090d886e6b8ecc99bc6ac09d8687ef6 100644 (file)
@@ -1710,6 +1710,12 @@ static int virtnet_probe(struct virtio_device *vdev)
        struct virtnet_info *vi;
        u16 max_queue_pairs;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        if (!virtnet_validate_features(vdev))
                return -EINVAL;
 
index 83c39e2858bf70a1673cf2c6d9813a92f25ce4d3..88d121d43c08bedf2efc3265964188cf2b7f94a7 100644 (file)
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
        spin_lock_irqsave(&cosa->lock, flags);
        add_wait_queue(&chan->rxwaitq, &wait);
        while (!chan->rx_status) {
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(&cosa->lock, flags);
                schedule();
                spin_lock_irqsave(&cosa->lock, flags);
                if (signal_pending(current) && chan->rx_status == 0) {
                        chan->rx_status = 1;
                        remove_wait_queue(&chan->rxwaitq, &wait);
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        mutex_unlock(&chan->rlock);
                        return -ERESTARTSYS;
                }
        }
        remove_wait_queue(&chan->rxwaitq, &wait);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        kbuf = chan->rxdata;
        count = chan->rxsize;
        spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
        spin_lock_irqsave(&cosa->lock, flags);
        add_wait_queue(&chan->txwaitq, &wait);
        while (!chan->tx_status) {
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(&cosa->lock, flags);
                schedule();
                spin_lock_irqsave(&cosa->lock, flags);
                if (signal_pending(current) && chan->tx_status == 0) {
                        chan->tx_status = 1;
                        remove_wait_queue(&chan->txwaitq, &wait);
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        chan->tx_status = 1;
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
                }
        }
        remove_wait_queue(&chan->txwaitq, &wait);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        up(&chan->wsem);
        spin_unlock_irqrestore(&cosa->lock, flags);
        kfree(kbuf);
index e71a2ce7a4487a5386331e26023e5538512f14bd..627443283e1d7ab9022cda91c4acd07deff6f84f 100644 (file)
@@ -2676,7 +2676,7 @@ static void wifi_setup(struct net_device *dev)
        dev->addr_len           = ETH_ALEN;
        dev->tx_queue_len       = 100; 
 
-       memset(dev->broadcast,0xFF, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
 
        dev->flags              = IFF_BROADCAST|IFF_MULTICAST;
 }
@@ -3273,7 +3273,7 @@ static void airo_handle_link(struct airo_info *ai)
                }
 
                /* Send event to user space */
-               memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
                wrqu.ap_addr.sa_family = ARPHRD_ETHER;
                wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
        }
index da92bfa76b7cf1d37e9ea819edf00c59d84e350b..49219c5089639574a471be4befa951da71e620c3 100644 (file)
@@ -1166,7 +1166,7 @@ static int at76_start_monitor(struct at76_priv *priv)
        int ret;
 
        memset(&scan, 0, sizeof(struct at76_req_scan));
-       memset(scan.bssid, 0xff, ETH_ALEN);
+       eth_broadcast_addr(scan.bssid);
 
        scan.channel = priv->channel;
        scan.scan_type = SCAN_TYPE_PASSIVE;
@@ -1427,7 +1427,7 @@ static int at76_startup_device(struct at76_priv *priv)
        at76_wait_completion(priv, CMD_STARTUP);
 
        /* remove BSSID from previous run */
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
 
        priv->scanning = false;
 
@@ -1973,7 +1973,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
        ieee80211_stop_queues(hw);
 
        memset(&scan, 0, sizeof(struct at76_req_scan));
-       memset(scan.bssid, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(scan.bssid);
 
        if (req->n_ssids) {
                scan.scan_type = SCAN_TYPE_ACTIVE;
index c18647b87f71dd6879a4d3ea39b22ad653d5a449..0eddb204d85bb9b08dcb84f55530b25fb3b901e8 100644 (file)
@@ -39,7 +39,7 @@ struct ath10k_ce_pipe;
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
 #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB  3
+#define CE_DESC_FLAGS_META_DATA_LSB  2
 
 struct ce_desc {
        __le32 addr;
index 310e12bc078a6e47dd9f52637db92c1efed6d227..c0e454bb6a8df646b9266afbffa76ed179dff0fc 100644 (file)
@@ -436,16 +436,16 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-       if (ar->board && !IS_ERR(ar->board))
+       if (!IS_ERR(ar->board))
                release_firmware(ar->board);
 
-       if (ar->otp && !IS_ERR(ar->otp))
+       if (!IS_ERR(ar->otp))
                release_firmware(ar->otp);
 
-       if (ar->firmware && !IS_ERR(ar->firmware))
+       if (!IS_ERR(ar->firmware))
                release_firmware(ar->firmware);
 
-       if (ar->cal_file && !IS_ERR(ar->cal_file))
+       if (!IS_ERR(ar->cal_file))
                release_firmware(ar->cal_file);
 
        ar->board = NULL;
index d60e46fe6d19ccc475a173df2fada3ea735d69d4..f65310c3ba5fe8d660cd4139f93407a22045b8aa 100644 (file)
@@ -159,6 +159,25 @@ struct ath10k_fw_stats_peer {
        u32 peer_rx_rate; /* 10x only */
 };
 
+struct ath10k_fw_stats_vdev {
+       struct list_head list;
+
+       u32 vdev_id;
+       u32 beacon_snr;
+       u32 data_snr;
+       u32 num_tx_frames[4];
+       u32 num_rx_frames;
+       u32 num_tx_frames_retries[4];
+       u32 num_tx_frames_failures[4];
+       u32 num_rts_fail;
+       u32 num_rts_success;
+       u32 num_rx_err;
+       u32 num_rx_discard;
+       u32 num_tx_not_acked;
+       u32 tx_rate_history[10];
+       u32 beacon_rssi_history[10];
+};
+
 struct ath10k_fw_stats_pdev {
        struct list_head list;
 
@@ -220,6 +239,7 @@ struct ath10k_fw_stats_pdev {
 
 struct ath10k_fw_stats {
        struct list_head pdevs;
+       struct list_head vdevs;
        struct list_head peers;
 };
 
@@ -288,6 +308,7 @@ struct ath10k_vif {
        bool is_started;
        bool is_up;
        bool spectral_enabled;
+       bool ps;
        u32 aid;
        u8 bssid[ETH_ALEN];
 
@@ -413,6 +434,12 @@ enum ath10k_fw_features {
         */
        ATH10K_FW_FEATURE_WMI_10_2 = 4,
 
+       /* Some firmware revisions lack proper multi-interface client powersave
+        * implementation. Enabling PS could result in connection drops,
+        * traffic stalls, etc.
+        */
+       ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
index d2281e5c2ffe2070039b43ca1d380860cd4f2294..301081db1ef60a9f7a68a9452d7155965520cc39 100644 (file)
@@ -243,6 +243,16 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
        }
 }
 
+static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+{
+       struct ath10k_fw_stats_vdev *i, *tmp;
+
+       list_for_each_entry_safe(i, tmp, head, list) {
+               list_del(&i->list);
+               kfree(i);
+       }
+}
+
 static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
 {
        struct ath10k_fw_stats_peer *i, *tmp;
@@ -258,6 +268,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
        spin_lock_bh(&ar->data_lock);
        ar->debug.fw_stats_done = false;
        ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+       ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
        ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
        spin_unlock_bh(&ar->data_lock);
 }
@@ -273,14 +284,27 @@ static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
        return num;
 }
 
+static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
+{
+       struct ath10k_fw_stats_vdev *i;
+       size_t num = 0;
+
+       list_for_each_entry(i, head, list)
+               ++num;
+
+       return num;
+}
+
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_fw_stats stats = {};
        bool is_start, is_started, is_end;
        size_t num_peers;
+       size_t num_vdevs;
        int ret;
 
        INIT_LIST_HEAD(&stats.pdevs);
+       INIT_LIST_HEAD(&stats.vdevs);
        INIT_LIST_HEAD(&stats.peers);
 
        spin_lock_bh(&ar->data_lock);
@@ -308,6 +332,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
        }
 
        num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+       num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
        is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
                    !list_empty(&stats.pdevs));
        is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@@ -330,7 +355,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
                        goto free;
                }
 
+               if (num_vdevs >= BITS_PER_LONG) {
+                       ath10k_warn(ar, "dropping fw vdev stats\n");
+                       goto free;
+               }
+
                list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+               list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
        }
 
        complete(&ar->debug.fw_stats_complete);
@@ -340,6 +371,7 @@ free:
         * resources if that is not the case.
         */
        ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+       ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
        ath10k_debug_fw_stats_peers_free(&stats.peers);
 
 unlock:
@@ -363,7 +395,10 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
 
                reinit_completion(&ar->debug.fw_stats_complete);
 
-               ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+               ret = ath10k_wmi_request_stats(ar,
+                                              WMI_STAT_PDEV |
+                                              WMI_STAT_VDEV |
+                                              WMI_STAT_PEER);
                if (ret) {
                        ath10k_warn(ar, "could not request stats (%d)\n", ret);
                        return ret;
@@ -395,8 +430,11 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
        unsigned int len = 0;
        unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
        const struct ath10k_fw_stats_pdev *pdev;
+       const struct ath10k_fw_stats_vdev *vdev;
        const struct ath10k_fw_stats_peer *peer;
        size_t num_peers;
+       size_t num_vdevs;
+       int i;
 
        spin_lock_bh(&ar->data_lock);
 
@@ -408,6 +446,7 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
        }
 
        num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
+       num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
 
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -529,6 +568,65 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
        len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
                         "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
 
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+                        "ath10k VDEV stats", num_vdevs);
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "vdev id", vdev->vdev_id);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "beacon snr", vdev->beacon_snr);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "data snr", vdev->data_snr);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rx frames", vdev->num_rx_frames);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rts fail", vdev->num_rts_fail);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rts success", vdev->num_rts_success);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rx err", vdev->num_rx_err);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num rx discard", vdev->num_rx_discard);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "num tx not acked", vdev->num_tx_not_acked);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "num tx frames", i,
+                                        vdev->num_tx_frames[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "num tx frames retries", i,
+                                        vdev->num_tx_frames_retries[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "num tx frames failures", i,
+                                        vdev->num_tx_frames_failures[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] 0x%08x\n",
+                                        "tx rate history", i,
+                                        vdev->tx_rate_history[i]);
+
+               for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+                       len += scnprintf(buf + len, buf_len - len,
+                                       "%25s [%02d] %u\n",
+                                        "beacon rssi history", i,
+                                        vdev->beacon_rssi_history[i]);
+
+               len += scnprintf(buf + len, buf_len - len, "\n");
+       }
+
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
                         "ath10k PEER stats", num_peers);
@@ -1900,6 +1998,7 @@ int ath10k_debug_create(struct ath10k *ar)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+       INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
        INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
 
        return 0;
index c1da44f65a4d0230bb2aac9461be7ca10a8ceb5c..01a2b384f358355ded1207323dd2f499b9e2a3c3 100644 (file)
@@ -176,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
         * automatically balances load wrt to CPU power.
         *
         * This probably comes at a cost of lower maximum throughput but
-        * improves the avarage and stability. */
+        * improves the average and stability. */
        spin_lock_bh(&htt->rx_ring.lock);
        num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
        num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
index d6d2f0f00caad18ec00ba69f5635c79f161b2ba7..5d2db069d46e439b83e77557940d4e854da91399 100644 (file)
@@ -611,7 +611,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n",
+               ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
                            vdev_id, ret);
                return ret;
        }
@@ -658,7 +658,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret)
-               ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n",
+               ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
                            ar->monitor_vdev_id, ret);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
@@ -927,8 +927,9 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n",
-                           arg.vdev_id, ret);
+               ath10k_warn(ar,
+                           "failed to synchronize setup for vdev %i restart %d: %d\n",
+                           arg.vdev_id, restart, ret);
                return ret;
        }
 
@@ -966,7 +967,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+               ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -1182,7 +1183,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                if (is_zero_ether_addr(arvif->bssid))
                        return;
 
-               memset(arvif->bssid, 0, ETH_ALEN);
+               eth_zero_addr(arvif->bssid);
 
                return;
        }
@@ -1253,6 +1254,20 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
        return 0;
 }
 
+static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+{
+       struct ath10k_vif *arvif;
+       int num = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       list_for_each_entry(arvif, &ar->arvifs, list)
+               if (arvif->ps)
+                       num++;
+
+       return num;
+}
+
 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 {
        struct ath10k *ar = arvif->ar;
@@ -1262,13 +1277,24 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
        enum wmi_sta_ps_mode psmode;
        int ret;
        int ps_timeout;
+       bool enable_ps;
 
        lockdep_assert_held(&arvif->ar->conf_mutex);
 
        if (arvif->vif->type != NL80211_IFTYPE_STATION)
                return 0;
 
-       if (vif->bss_conf.ps) {
+       enable_ps = arvif->ps;
+
+       if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+           !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+                     ar->fw_features)) {
+               ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+                           arvif->vdev_id);
+               enable_ps = false;
+       }
+
+       if (enable_ps) {
                psmode = WMI_STA_PS_MODE_ENABLED;
                param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
 
@@ -1781,6 +1807,68 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
                                         ath10k_smps_map[smps]);
 }
 
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_sta_vht_cap vht_cap)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret;
+       u32 param;
+       u32 value;
+
+       if (!(ar->vht_cap_info &
+             (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+              IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+              IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+              IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+               return 0;
+
+       param = ar->wmi.vdev_param->txbf;
+       value = 0;
+
+       if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+               return 0;
+
+       /* The following logic is correct. If a remote STA advertises support
+        * for being a beamformer then we should enable us being a beamformee.
+        */
+
+       if (ar->vht_cap_info &
+           (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+            IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+               if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+               if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+       }
+
+       if (ar->vht_cap_info &
+           (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+            IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+               if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+               if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+                       value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+       }
+
+       if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+       if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+       if (ret) {
+               ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+                           value, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 /* can be called only in mac80211 callbacks due to `key_count` usage */
 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
@@ -1789,6 +1877,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct ieee80211_sta_ht_cap ht_cap;
+       struct ieee80211_sta_vht_cap vht_cap;
        struct wmi_peer_assoc_complete_arg peer_arg;
        struct ieee80211_sta *ap_sta;
        int ret;
@@ -1811,6 +1900,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        /* ap_sta must be accessed only within rcu section which must be left
         * before calling ath10k_setup_peer_smps() which might sleep. */
        ht_cap = ap_sta->ht_cap;
+       vht_cap = ap_sta->vht_cap;
 
        ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
        if (ret) {
@@ -1836,6 +1926,13 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                return;
        }
 
+       ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+       if (ret) {
+               ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+                           arvif->vdev_id, bss_conf->bssid, ret);
+               return;
+       }
+
        ath10k_dbg(ar, ATH10K_DBG_MAC,
                   "mac vdev %d up (associated) bssid %pM aid %d\n",
                   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
@@ -1853,6 +1950,18 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        }
 
        arvif->is_up = true;
+
+       /* Workaround: Some firmware revisions (tested with qca6174
+        * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+        * poked with peer param command.
+        */
+       ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+                                       WMI_PEER_DUMMY_VAR, 1);
+       if (ret) {
+               ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+                           arvif->bssid, arvif->vdev_id, ret);
+               return;
+       }
 }
 
 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
@@ -1860,6 +1969,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta_vht_cap vht_cap = {};
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -1874,6 +1984,13 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
 
        arvif->def_wep_key_idx = -1;
 
+       ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+       if (ret) {
+               ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return;
+       }
+
        arvif->is_up = false;
 }
 
@@ -2554,6 +2671,17 @@ static int ath10k_start_scan(struct ath10k *ar,
                return -ETIMEDOUT;
        }
 
+       /* If we failed to start the scan, return error code at
+        * this point.  This is probably due to some issue in the
+        * firmware, but no need to wedge the driver due to that...
+        */
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.state == ATH10K_SCAN_IDLE) {
+               spin_unlock_bh(&ar->data_lock);
+               return -EINVAL;
+       }
+       spin_unlock_bh(&ar->data_lock);
+
        /* Add a 200ms margin to account for event/command processing */
        ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
                                     msecs_to_jiffies(arg->max_scan_time+200));
@@ -3323,9 +3451,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        list_del(&arvif->list);
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
+               ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+                                            vif->addr);
                if (ret)
-                       ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n",
+                       ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
                                    arvif->vdev_id, ret);
 
                kfree(arvif->u.ap.noa_data);
@@ -3339,6 +3468,21 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
                ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
                            arvif->vdev_id, ret);
 
+       /* Some firmware revisions don't notify host about self-peer removal
+        * until after associated vdev is deleted.
+        */
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+                                                  vif->addr);
+               if (ret)
+                       ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+
+               spin_lock_bh(&ar->data_lock);
+               ar->num_peers--;
+               spin_unlock_bh(&ar->data_lock);
+       }
+
        ath10k_peer_cleanup(ar, arvif->vdev_id);
 
        mutex_unlock(&ar->conf_mutex);
@@ -3534,7 +3678,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        if (changed & BSS_CHANGED_PS) {
-               ret = ath10k_mac_vif_setup_ps(arvif);
+               arvif->ps = vif->bss_conf.ps;
+
+               ret = ath10k_config_ps(ar);
                if (ret)
                        ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
                                    arvif->vdev_id, ret);
index e6972b09333ebe37638a586213c46f1e31d11d56..7681237fe298a4d430ec4d8ac95fd5fad2cc9de2 100644 (file)
@@ -104,7 +104,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
        {
                .flags = CE_ATTR_FLAGS,
                .src_nentries = 0,
-               .src_sz_max = 512,
+               .src_sz_max = 2048,
                .dest_nentries = 512,
        },
 
@@ -174,7 +174,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
                .pipenum = __cpu_to_le32(1),
                .pipedir = __cpu_to_le32(PIPEDIR_IN),
                .nentries = __cpu_to_le32(32),
-               .nbytes_max = __cpu_to_le32(512),
+               .nbytes_max = __cpu_to_le32(2048),
                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
                .reserved = __cpu_to_le32(0),
        },
index 04dc4b9db04e70de19e6b2a0ce13db772225f0d3..c8b64e7a6089c2ba2f874cb5aad3007c5c04f296 100644 (file)
@@ -110,8 +110,7 @@ struct wmi_ops {
                                          bool deliver_cab);
        struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
                                            const struct wmi_wmm_params_all_arg *arg);
-       struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
-                                            enum wmi_stats_id stats_id);
+       struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
        struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
                                             enum wmi_force_fw_hang_type type,
                                             u32 delay_ms);
@@ -816,14 +815,14 @@ ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
 }
 
 static inline int
-ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
 {
        struct sk_buff *skb;
 
        if (!ar->wmi.ops->gen_request_stats)
                return -EOPNOTSUPP;
 
-       skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
+       skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
index 71614ba1b145e590bd26a4cb8112237584248a11..ee0c5f602e297424b3f5eb143cdda542b1231291 100644 (file)
@@ -869,16 +869,57 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
        return 0;
 }
 
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+                                          struct ath10k_fw_stats_vdev *dst)
+{
+       int i;
+
+       dst->vdev_id = __le32_to_cpu(src->vdev_id);
+       dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+       dst->data_snr = __le32_to_cpu(src->data_snr);
+       dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+       dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+       dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+       dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+       dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+       dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+       for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+               dst->num_tx_frames[i] =
+                       __le32_to_cpu(src->num_tx_frames[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+               dst->num_tx_frames_retries[i] =
+                       __le32_to_cpu(src->num_tx_frames_retries[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+               dst->num_tx_frames_failures[i] =
+                       __le32_to_cpu(src->num_tx_frames_failures[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+               dst->tx_rate_history[i] =
+                       __le32_to_cpu(src->tx_rate_history[i]);
+
+       for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+               dst->beacon_rssi_history[i] =
+                       __le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
                                           struct sk_buff *skb,
                                           struct ath10k_fw_stats *stats)
 {
        const void **tb;
-       const struct wmi_stats_event *ev;
+       const struct wmi_tlv_stats_ev *ev;
        const void *data;
-       u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+       u32 num_pdev_stats;
+       u32 num_vdev_stats;
+       u32 num_peer_stats;
+       u32 num_bcnflt_stats;
+       u32 num_chan_stats;
        size_t data_len;
        int ret;
+       int i;
 
        tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
        if (IS_ERR(tb)) {
@@ -899,8 +940,73 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
        num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
        num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
        num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+       num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+       num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+                  num_pdev_stats, num_vdev_stats, num_peer_stats,
+                  num_bcnflt_stats, num_chan_stats);
+
+       for (i = 0; i < num_pdev_stats; i++) {
+               const struct wmi_pdev_stats *src;
+               struct ath10k_fw_stats_pdev *dst;
+
+               src = data;
+               if (data_len < sizeof(*src))
+                       return -EPROTO;
+
+               data += sizeof(*src);
+               data_len -= sizeof(*src);
+
+               dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+               if (!dst)
+                       continue;
+
+               ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+               ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+               ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+               list_add_tail(&dst->list, &stats->pdevs);
+       }
+
+       for (i = 0; i < num_vdev_stats; i++) {
+               const struct wmi_tlv_vdev_stats *src;
+               struct ath10k_fw_stats_vdev *dst;
+
+               src = data;
+               if (data_len < sizeof(*src))
+                       return -EPROTO;
+
+               data += sizeof(*src);
+               data_len -= sizeof(*src);
+
+               dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+               if (!dst)
+                       continue;
 
-       WARN_ON(1); /* FIXME: not implemented yet */
+               ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+               list_add_tail(&dst->list, &stats->vdevs);
+       }
+
+       for (i = 0; i < num_peer_stats; i++) {
+               const struct wmi_10x_peer_stats *src;
+               struct ath10k_fw_stats_peer *dst;
+
+               src = data;
+               if (data_len < sizeof(*src))
+                       return -EPROTO;
+
+               data += sizeof(*src);
+               data_len -= sizeof(*src);
+
+               dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+               if (!dst)
+                       continue;
+
+               ath10k_wmi_pull_peer_stats(&src->old, dst);
+               dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+               list_add_tail(&dst->list, &stats->peers);
+       }
 
        kfree(tb);
        return 0;
@@ -1604,14 +1710,12 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
                                    const struct wmi_wmm_params_all_arg *arg)
 {
        struct wmi_tlv_vdev_set_wmm_cmd *cmd;
-       struct wmi_wmm_params *wmm;
        struct wmi_tlv *tlv;
        struct sk_buff *skb;
        size_t len;
        void *ptr;
 
-       len = (sizeof(*tlv) + sizeof(*cmd)) +
-             (4 * (sizeof(*tlv) + sizeof(*wmm)));
+       len = sizeof(*tlv) + sizeof(*cmd);
        skb = ath10k_wmi_alloc_skb(ar, len);
        if (!skb)
                return ERR_PTR(-ENOMEM);
@@ -1623,13 +1727,10 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
        cmd = (void *)tlv->value;
        cmd->vdev_id = __cpu_to_le32(vdev_id);
 
-       ptr += sizeof(*tlv);
-       ptr += sizeof(*cmd);
-
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
-       ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+       ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
        return skb;
@@ -2080,8 +2181,7 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
 }
 
 static struct sk_buff *
-ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
-                                   enum wmi_stats_id stats_id)
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 {
        struct wmi_request_stats_cmd *cmd;
        struct wmi_tlv *tlv;
@@ -2095,7 +2195,7 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
        tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
        tlv->len = __cpu_to_le16(sizeof(*cmd));
        cmd = (void *)tlv->value;
-       cmd->stats_id = __cpu_to_le32(stats_id);
+       cmd->stats_id = __cpu_to_le32(stats_mask);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
        return skb;
index de68fe76eae6eea583b048a7293a01858d6bc4c4..a6c8280cc4b194384c1abee08e8d379bd4754e23 100644 (file)
@@ -1302,8 +1302,14 @@ struct wmi_tlv_pdev_set_wmm_cmd {
        __le32 dg_type; /* no idea.. */
 } __packed;
 
+struct wmi_tlv_vdev_wmm_params {
+       __le32 dummy;
+       struct wmi_wmm_params params;
+} __packed;
+
 struct wmi_tlv_vdev_set_wmm_cmd {
        __le32 vdev_id;
+       struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
 } __packed;
 
 struct wmi_tlv_phyerr_ev {
@@ -1439,6 +1445,15 @@ struct wmi_tlv_sta_keepalive_cmd {
        __le32 interval; /* in seconds */
 } __packed;
 
+struct wmi_tlv_stats_ev {
+       __le32 stats_id; /* WMI_STAT_ */
+       __le32 num_pdev_stats;
+       __le32 num_vdev_stats;
+       __le32 num_peer_stats;
+       __le32 num_bcnflt_stats;
+       __le32 num_chan_stats;
+} __packed;
+
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
 #endif
index aeea1c7939434d561e61fb9eed4784dafa5ffacb..c7ea77edce245ccd389111ad3ac9fd3958d4bc89 100644 (file)
@@ -1125,6 +1125,25 @@ static void ath10k_wmi_event_scan_started(struct ath10k *ar)
        }
 }
 
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+       lockdep_assert_held(&ar->data_lock);
+
+       switch (ar->scan.state) {
+       case ATH10K_SCAN_IDLE:
+       case ATH10K_SCAN_RUNNING:
+       case ATH10K_SCAN_ABORTING:
+               ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+                           ath10k_scan_state_str(ar->scan.state),
+                           ar->scan.state);
+               break;
+       case ATH10K_SCAN_STARTING:
+               complete(&ar->scan.started);
+               __ath10k_scan_finish(ar);
+               break;
+       }
+}
+
 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
 {
        lockdep_assert_held(&ar->data_lock);
@@ -1292,6 +1311,7 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_SCAN_EVENT_START_FAILED:
                ath10k_warn(ar, "received scan start failure event\n");
+               ath10k_wmi_event_scan_start_failed(ar);
                break;
        case WMI_SCAN_EVENT_DEQUEUED:
        case WMI_SCAN_EVENT_PREEMPTED:
@@ -4954,7 +4974,7 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
 }
 
 static struct sk_buff *
-ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 {
        struct wmi_request_stats_cmd *cmd;
        struct sk_buff *skb;
@@ -4964,9 +4984,10 @@ ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
                return ERR_PTR(-ENOMEM);
 
        cmd = (struct wmi_request_stats_cmd *)skb->data;
-       cmd->stats_id = __cpu_to_le32(stats_id);
+       cmd->stats_id = __cpu_to_le32(stats_mask);
 
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+                  stats_mask);
        return skb;
 }
 
index 20ce3603e64b73374aeb1ce452a930092189e974..adf935bf0580f488708688c4728aaab1f7325dc5 100644 (file)
@@ -3057,8 +3057,12 @@ struct wmi_pdev_stats_peer {
 } __packed;
 
 enum wmi_stats_id {
-       WMI_REQUEST_PEER_STAT   = 0x01,
-       WMI_REQUEST_AP_STAT     = 0x02
+       WMI_STAT_PEER = BIT(0),
+       WMI_STAT_AP = BIT(1),
+       WMI_STAT_PDEV = BIT(2),
+       WMI_STAT_VDEV = BIT(3),
+       WMI_STAT_BCNFLT = BIT(4),
+       WMI_STAT_VDEV_RATE = BIT(5),
 };
 
 struct wlan_inst_rssi_args {
@@ -3093,7 +3097,7 @@ struct wmi_pdev_suspend_cmd {
 } __packed;
 
 struct wmi_stats_event {
-       __le32 stats_id; /* %WMI_REQUEST_ */
+       __le32 stats_id; /* WMI_STAT_ */
        /*
         * number of pdev stats event structures
         * (wmi_pdev_stats) 0 or 1
@@ -3745,6 +3749,11 @@ enum wmi_10x_vdev_param {
        WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
 };
 
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG                0x1
 /* slot time short */
@@ -4436,7 +4445,8 @@ enum wmi_peer_param {
        WMI_PEER_AUTHORIZE  = 0x3,
        WMI_PEER_CHAN_WIDTH = 0x4,
        WMI_PEER_NSS        = 0x5,
-       WMI_PEER_USE_4ADDR  = 0x6
+       WMI_PEER_USE_4ADDR  = 0x6,
+       WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
 };
 
 struct wmi_peer_set_param_cmd {
index bc9cb356fa697fd67efcb186f7e28e890bbe3287..57a80e89822d7b36ffce951c98e9f12d158942cb 100644 (file)
@@ -528,7 +528,7 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
         * together with the BSSID mask when matching addresses.
         */
        iter_data.hw_macaddr = common->macaddr;
-       memset(&iter_data.mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(iter_data.mask);
        iter_data.found_active = false;
        iter_data.need_set_hw_addr = true;
        iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
index 85da63a67faf56f35f92615f3bae5cda67504c4d..e2978037d8588c3cb58e6dc48bd7968da0263a46 100644 (file)
@@ -2033,7 +2033,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
        int ret;
 
        /* Setup unicast pkt pattern */
-       memset(mac_mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(mac_mask);
        ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
                                vif->fw_vif_idx, WOW_LIST_ID,
                                ETH_ALEN, 0, ndev->dev_addr,
index b42ba46b50307d09e9972150297e7df0a7eb5c8e..1af3fed5a72caa203e9cbda00f677905ef966863 100644 (file)
@@ -105,7 +105,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
 
        memset(&ar->ap_stats.sta[sta->aid - 1], 0,
               sizeof(struct wmi_per_sta_stat));
-       memset(sta->mac, 0, ETH_ALEN);
+       eth_zero_addr(sta->mac);
        memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
        sta->aid = 0;
        sta->sta_flags = 0;
index 7b94a6c7db3d50dd4feb53dd74e1973bd68f2e6d..bd169fae32a1b7f8bad03dcc7564a56bd4ff456f 100644 (file)
@@ -284,12 +284,12 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
                  AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
        REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
 
-       if (mci->is_2g) {
+       if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
                ar9003_mci_send_lna_transfer(ah, true);
                udelay(5);
        }
 
-       if ((mci->is_2g && !mci->update_2g5g)) {
+       if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
                if (ar9003_mci_wait_for_interrupt(ah,
                                        AR_MCI_INTERRUPT_RX_MSG_RAW,
                                        AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
@@ -593,7 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
                if (!time_out)
                        break;
 
-               offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
+               offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
 
                if (offset == MCI_GPM_INVALID)
                        continue;
@@ -657,7 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
                time_out = 0;
 
        while (more_data == MCI_GPM_MORE) {
-               offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
+               offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
                if (offset == MCI_GPM_INVALID)
                        break;
 
@@ -771,8 +771,14 @@ exit:
 
 static void ar9003_mci_mute_bt(struct ath_hw *ah)
 {
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
        /* disable all MCI messages */
        REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
        REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
 
        /* wait pending HW messages to flush out */
@@ -783,9 +789,10 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
         * 1. reset not after resuming from full sleep
         * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
         */
-       ar9003_mci_send_lna_take(ah, true);
-
-       udelay(5);
+       if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
+               ar9003_mci_send_lna_take(ah, true);
+               udelay(5);
+       }
 
        ar9003_mci_send_sys_sleeping(ah, true);
 }
@@ -821,6 +828,80 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
                      AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
 }
 
+static void ar9003_mci_stat_setup(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+       if (!AR_SREV_9565(ah))
+               return;
+
+       if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) {
+               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+                             AR_MCI_DBG_CNT_CTRL_ENABLE, 1);
+               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+                             AR_MCI_DBG_CNT_CTRL_BT_LINKID,
+                             MCI_STAT_ALL_BT_LINKID);
+       } else {
+               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+                             AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
+       }
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah)
+{
+       u32 regval;
+
+       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+                     AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah)
+{
+       u32 regval;
+
+       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(0, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(0, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+                     AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0);
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah)
+{
+       u32 regval;
+
+        regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
 int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
                     bool is_full_sleep)
 {
@@ -831,11 +912,6 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
                is_full_sleep, is_2g);
 
-       if (!mci->gpm_addr && !mci->sched_addr) {
-               ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
-               return -ENOMEM;
-       }
-
        if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
                ath_err(common, "BTCOEX control register is dead\n");
                return -EINVAL;
@@ -850,26 +926,17 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        * To avoid MCI state machine be affected by incoming remote MCI msgs,
        * MCI mode will be enabled later, right before reset the MCI TX and RX.
        */
-
-       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
-                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
-                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
-                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
-                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
-                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
-                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
        if (AR_SREV_9565(ah)) {
-               regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
-                         SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
-               REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
-                             AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+               u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH);
+
+               if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED)
+                       ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah);
+               else
+                       ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah);
        } else {
-               regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
-                         SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+               ar9003_mci_set_btcoex_ctrl_9462(ah);
        }
 
-       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
-
        if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
                ar9003_mci_osla_setup(ah, true);
        else
@@ -926,23 +993,26 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
        regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
        REG_WRITE(ah, AR_MCI_COMMAND2, regval);
 
-       ar9003_mci_get_next_gpm_offset(ah, true, NULL);
+       /* Init GPM offset after MCI Reset Rx */
+       ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
 
        REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
                  (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
                   SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
 
-       REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
-                   AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+       if (MCI_ANT_ARCH_PA_LNA_SHARED(mci))
+               REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+                           AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+       else
+               REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+                           AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
 
        ar9003_mci_observation_set_up(ah);
 
        mci->ready = true;
        ar9003_mci_prep_interface(ah);
+       ar9003_mci_stat_setup(ah);
 
-       if (AR_SREV_9565(ah))
-               REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
-                             AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
        if (en_int)
                ar9003_mci_enable_interrupt(ah);
 
@@ -1218,6 +1288,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
                }
                value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
                break;
+       case MCI_STATE_INIT_GPM_OFFSET:
+               value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+               if (value < mci->gpm_len)
+                       mci->gpm_idx = value;
+               else
+                       mci->gpm_idx = 0;
+               break;
        case MCI_STATE_LAST_SCHD_MSG_OFFSET:
                value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
                                    AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1364,21 +1442,11 @@ void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
        mci->gpm_idx = 0;
 }
 
-u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more)
 {
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
        u32 offset, more_gpm = 0, gpm_ptr;
 
-       if (first) {
-               gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
-
-               if (gpm_ptr >= mci->gpm_len)
-                       gpm_ptr = 0;
-
-               mci->gpm_idx = gpm_ptr;
-               return gpm_ptr;
-       }
-
        /*
         * This could be useful to avoid new GPM message interrupt which
         * may lead to spurious interrupt after power sleep, or multiple
index 66d7ab9f920dbccf15739e4511ae3089330acda4..e288611c12d50de2959c8a0178597c60097ae6b9 100644 (file)
@@ -92,14 +92,36 @@ enum mci_gpm_coex_bt_update_flags_op {
 #define ATH_MCI_CONFIG_CLK_DIV              0x00003000
 #define ATH_MCI_CONFIG_CLK_DIV_S            12
 #define ATH_MCI_CONFIG_DISABLE_TUNING       0x00004000
+#define ATH_MCI_CONFIG_DISABLE_AIC          0x00008000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN     0x007f0000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN_S   16
+#define ATH_MCI_CONFIG_NO_QUIET_ACK         0x00800000
+#define ATH_MCI_CONFIG_NO_QUIET_ACK_S       23
+#define ATH_MCI_CONFIG_ANT_ARCH             0x07000000
+#define ATH_MCI_CONFIG_ANT_ARCH_S           24
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK      0x08000000
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK_S    27
+#define ATH_MCI_CONFIG_FORCE_2CHAIN_ACK     0x10000000
+#define ATH_MCI_CONFIG_MCI_STAT_DBG         0x20000000
 #define ATH_MCI_CONFIG_MCI_WEIGHT_DBG       0x40000000
 #define ATH_MCI_CONFIG_DISABLE_MCI          0x80000000
 
 #define ATH_MCI_CONFIG_MCI_OBS_MASK     (ATH_MCI_CONFIG_MCI_OBS_MCI  | \
                                         ATH_MCI_CONFIG_MCI_OBS_TXRX | \
                                         ATH_MCI_CONFIG_MCI_OBS_BT)
+
 #define ATH_MCI_CONFIG_MCI_OBS_GPIO     0x0000002F
 
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_NON_SHARED 0x00
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED     0x01
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_NON_SHARED 0x02
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED     0x03
+#define ATH_MCI_ANT_ARCH_3_ANT                   0x04
+
+#define MCI_ANT_ARCH_PA_LNA_SHARED(mci)                                        \
+       ((MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) || \
+        (MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED))
+
 enum mci_message_header {              /* length of payload */
        MCI_LNA_CTRL     = 0x10,        /* len = 0 */
        MCI_CONT_NACK    = 0x20,        /* len = 0 */
@@ -188,20 +210,55 @@ enum mci_bt_state {
        MCI_BT_CAL
 };
 
+enum mci_ps_state {
+       MCI_PS_DISABLE,
+       MCI_PS_ENABLE,
+       MCI_PS_ENABLE_OFF,
+       MCI_PS_ENABLE_ON
+};
+
 /* Type of state query */
 enum mci_state_type {
        MCI_STATE_ENABLE,
+       MCI_STATE_INIT_GPM_OFFSET,
+       MCI_STATE_CHECK_GPM_OFFSET,
+       MCI_STATE_NEXT_GPM_OFFSET,
+       MCI_STATE_LAST_GPM_OFFSET,
+       MCI_STATE_BT,
+       MCI_STATE_SET_BT_SLEEP,
        MCI_STATE_SET_BT_AWAKE,
+       MCI_STATE_SET_BT_CAL_START,
+       MCI_STATE_SET_BT_CAL,
        MCI_STATE_LAST_SCHD_MSG_OFFSET,
        MCI_STATE_REMOTE_SLEEP,
+       MCI_STATE_CONT_STATUS,
        MCI_STATE_RESET_REQ_WAKE,
        MCI_STATE_SEND_WLAN_COEX_VERSION,
+       MCI_STATE_SET_BT_COEX_VERSION,
+       MCI_STATE_SEND_WLAN_CHANNELS,
        MCI_STATE_SEND_VERSION_QUERY,
        MCI_STATE_SEND_STATUS_QUERY,
+       MCI_STATE_NEED_FLUSH_BT_INFO,
+       MCI_STATE_SET_CONCUR_TX_PRI,
        MCI_STATE_RECOVER_RX,
        MCI_STATE_NEED_FTP_STOMP,
+       MCI_STATE_NEED_TUNING,
+       MCI_STATE_NEED_STAT_DEBUG,
+       MCI_STATE_SHARED_CHAIN_CONCUR_TX,
+       MCI_STATE_AIC_CAL,
+       MCI_STATE_AIC_START,
+       MCI_STATE_AIC_CAL_RESET,
+       MCI_STATE_AIC_CAL_SINGLE,
+       MCI_STATE_IS_AR9462,
+       MCI_STATE_IS_AR9565_1ANT,
+       MCI_STATE_IS_AR9565_2ANT,
+       MCI_STATE_WLAN_WEAK_SIGNAL,
+       MCI_STATE_SET_WLAN_PS_STATE,
+       MCI_STATE_GET_WLAN_PS_STATE,
        MCI_STATE_DEBUG,
-       MCI_STATE_NEED_FLUSH_BT_INFO,
+       MCI_STATE_STAT_DEBUG,
+       MCI_STATE_ALLOW_FCS,
+       MCI_STATE_SET_2G_CONTENTION,
        MCI_STATE_MAX
 };
 
@@ -255,7 +312,7 @@ int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
 void ar9003_mci_cleanup(struct ath_hw *ah);
 void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
                              u32 *rx_msg_intr);
-u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more);
 void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
 void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
 /*
index 86bfc9604dcabec92e8ba4703f1bdc3637ad30f5..bea41df9fbd7407575f79349ac7d4b68ed72d955 100644 (file)
 #include "reg_wow.h"
 #include "hw-ops.h"
 
+static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
+{
+       if (!ath9k_hw_mci_is_enabled(ah))
+               goto set;
+       /*
+        * If MCI is being used, set PWR_SAV only when MCI's
+        * PS state is disabled.
+        */
+       if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
+               return;
+set:
+       REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+}
+
 static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
 
-       REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+       ath9k_hw_set_sta_powersave(ah);
 
        /* set rx disable bit */
        REG_WRITE(ah, AR_CR, AR_CR_RXD);
@@ -44,6 +58,9 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
                        REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
        }
 
+       if (ath9k_hw_mci_is_enabled(ah))
+               REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
        REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
 }
 
@@ -74,8 +91,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
        for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
                REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
 
-       REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
        data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
                       (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
        data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
@@ -88,9 +103,11 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
                       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
        data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
 
-       if (AR_SREV_9462_20(ah)) {
-               /* AR9462 2.0 has an extra descriptor word (time based
-                * discard) compared to other chips */
+       if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
+               /*
+                * AR9462 2.0 and AR9565 have an extra descriptor word
+                * (time based discard) compared to other chips.
+                */
                REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
                wow_ka_data_word0 = AR_WOW_TXBUF(13);
        } else {
@@ -99,7 +116,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
 
        for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
                REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
-
 }
 
 int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
@@ -170,18 +186,17 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
        u32 val = 0, rval;
 
        /*
-        * read the WoW status register to know
-        * the wakeup reason
+        * Read the WoW status register to know
+        * the wakeup reason.
         */
        rval = REG_READ(ah, AR_WOW_PATTERN);
        val = AR_WOW_STATUS(rval);
 
        /*
-        * mask only the WoW events that we have enabled. Sometimes
+        * Mask only the WoW events that we have enabled. Sometimes
         * we have spurious WoW events from the AR_WOW_PATTERN
         * register. This mask will clean it up.
         */
-
        val &= ah->wow.wow_event_mask;
 
        if (val) {
@@ -195,6 +210,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
                        wow_status |= AH_WOW_BEACON_MISS;
        }
 
+       rval = REG_READ(ah, AR_MAC_PCU_WOW4);
+       val = AR_WOW_STATUS2(rval);
+       val &= ah->wow.wow_event_mask2;
+
+       if (val) {
+               if (AR_WOW2_PATTERN_FOUND(val))
+                       wow_status |= AH_WOW_USER_PATTERN_EN;
+       }
+
        /*
         * set and clear WOW_PME_CLEAR registers for the chip to
         * generate next wow signal.
@@ -206,10 +230,12 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
                AR_PMCTRL_PWR_STATE_D1D3);
 
        /*
-        * clear all events
+        * Clear all events.
         */
        REG_WRITE(ah, AR_WOW_PATTERN,
                  AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+       REG_WRITE(ah, AR_MAC_PCU_WOW4,
+                 AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
 
        /*
         * restore the beacon threshold to init value
@@ -226,7 +252,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
        if (ah->is_pciexpress)
                ath9k_hw_configpcipowersave(ah, false);
 
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
+               u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
+
+               if (!(dc & AR_DC_TSF2_ENABLE))
+                       ath9k_hw_gen_timer_start_tsf2(ah);
+       }
+
        ah->wow.wow_event_mask = 0;
+       ah->wow.wow_event_mask2 = 0;
 
        return wow_status;
 }
@@ -408,6 +442,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
 
        ath9k_hw_wow_set_arwr_reg(ah);
 
+       if (ath9k_hw_mci_is_enabled(ah))
+               REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
        /* HW WoW */
        REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
 
index 0f8e9464e4ab36963ef015ec19c0a4537d8a668f..7e89236c0e13795fb1419f91af825685c0e2b0bc 100644 (file)
@@ -645,6 +645,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
                               struct ath9k_vif_iter_data *iter_data);
 void ath9k_calculate_summary_state(struct ath_softc *sc,
                                   struct ath_chanctx *ctx);
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif);
 
 /*******************/
 /* Beacon Handling */
index 3dfc2c7f1f07862ce81238aa297b4a45cb944fd8..5a084d94ed90793f22d5964bee6114a63a26f605 100644 (file)
@@ -103,7 +103,9 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
                return;
        }
 
-       if (AR_SREV_9300_20_OR_LATER(ah)) {
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+               btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+       } else if (AR_SREV_9300_20_OR_LATER(ah)) {
                btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
                btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
                btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -307,6 +309,18 @@ static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
        btcoex->enabled = true;
 }
 
+static void ath9k_hw_btcoex_disable_mci(struct ath_hw *ah)
+{
+       struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+       int i;
+
+       ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+
+       for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+               REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+                         btcoex_hw->wlan_weight[i]);
+}
+
 void ath9k_hw_btcoex_enable(struct ath_hw *ah)
 {
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@@ -318,17 +332,18 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
                ath9k_hw_btcoex_enable_2wire(ah);
                break;
        case ATH_BTCOEX_CFG_3WIRE:
-               if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-                       ath9k_hw_btcoex_enable_mci(ah);
-                       return;
-               }
                ath9k_hw_btcoex_enable_3wire(ah);
                break;
+       case ATH_BTCOEX_CFG_MCI:
+               ath9k_hw_btcoex_enable_mci(ah);
+               break;
        }
 
-       REG_RMW(ah, AR_GPIO_PDPU,
-               (0x2 << (btcoex_hw->btactive_gpio * 2)),
-               (0x3 << (btcoex_hw->btactive_gpio * 2)));
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+               REG_RMW(ah, AR_GPIO_PDPU,
+                       (0x2 << (btcoex_hw->btactive_gpio * 2)),
+                       (0x3 << (btcoex_hw->btactive_gpio * 2)));
+       }
 
        ah->btcoex_hw.enabled = true;
 }
@@ -340,14 +355,14 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
        int i;
 
        btcoex_hw->enabled = false;
-       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-               ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
-               for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
-                       REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
-                                 btcoex_hw->wlan_weight[i]);
+
+       if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) {
+               ath9k_hw_btcoex_disable_mci(ah);
                return;
        }
-       ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
+
+       if (!AR_SREV_9300_20_OR_LATER(ah))
+               ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
        ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
                        AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
index 6de26ea5d5fa11e190d89edd92dcec24058e1f75..5fe62ff2223b4311829671327f2d9f9f189948ae 100644 (file)
@@ -58,6 +58,7 @@ enum ath_btcoex_scheme {
        ATH_BTCOEX_CFG_NONE,
        ATH_BTCOEX_CFG_2WIRE,
        ATH_BTCOEX_CFG_3WIRE,
+       ATH_BTCOEX_CFG_MCI,
 };
 
 struct ath9k_hw_mci {
index 50a2e0ac3b8b4c5a8f5653e6dd59b15318fb56df..dbf8f495964217e1b5799fb165155ff3c88b4894 100644 (file)
@@ -1156,7 +1156,10 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
 
        if (tpc_enabled != ah->tpc_enabled) {
                ah->tpc_enabled = tpc_enabled;
-               ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+
+               mutex_lock(&sc->mutex);
+               ath9k_set_txpower(sc, NULL);
+               mutex_unlock(&sc->mutex);
        }
 
        return count;
index da344b27326c9f036e0ebef1ac9d48c5179a7134..86d46c196966f0e65b3c963011b124318f592f8b 100644 (file)
@@ -202,17 +202,16 @@ static void ath_btcoex_period_timer(unsigned long data)
        }
        spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
-       ath9k_mci_update_rssi(sc);
-
        ath9k_ps_wakeup(sc);
+       spin_lock_bh(&btcoex->btcoex_lock);
 
-       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
-               ath_detect_bt_priority(sc);
-
-       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+               ath9k_mci_update_rssi(sc);
                ath_mci_ftp_adjust(sc);
+       }
 
-       spin_lock_bh(&btcoex->btcoex_lock);
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+               ath_detect_bt_priority(sc);
 
        stomp_type = btcoex->bt_stomp_type;
        timer_period = btcoex->btcoex_no_stomp;
@@ -252,9 +251,6 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
        struct ath_softc *sc = (struct ath_softc *)arg;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_btcoex *btcoex = &sc->btcoex;
-       struct ath_common *common = ath9k_hw_common(ah);
-
-       ath_dbg(common, BTCOEX, "no stomp timer running\n");
 
        ath9k_ps_wakeup(sc);
        spin_lock_bh(&btcoex->btcoex_lock);
@@ -271,7 +267,7 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
        ath9k_ps_restore(sc);
 }
 
-static int ath_init_btcoex_timer(struct ath_softc *sc)
+static void ath_init_btcoex_timer(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
 
@@ -280,6 +276,7 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
                btcoex->btcoex_period / 100;
        btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
                                   btcoex->btcoex_period / 100;
+       btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
 
        setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
                        (unsigned long) sc);
@@ -287,8 +284,6 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
                        (unsigned long) sc);
 
        spin_lock_init(&btcoex->btcoex_lock);
-
-       return 0;
 }
 
 /*
@@ -299,6 +294,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
        struct ath_btcoex *btcoex = &sc->btcoex;
        struct ath_hw *ah = sc->sc_ah;
 
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+           ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+               return;
+
        ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
 
        /* make sure duty cycle timer is also stopped when resuming */
@@ -312,13 +311,19 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
        mod_timer(&btcoex->period_timer, jiffies);
 }
 
-
 /*
  * Pause btcoex timer and bt duty cycle timer
  */
 void ath9k_btcoex_timer_pause(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
+       struct ath_hw *ah = sc->sc_ah;
+
+       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+           ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+               return;
+
+       ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n");
 
        del_timer_sync(&btcoex->period_timer);
        del_timer_sync(&btcoex->no_stomp_timer);
@@ -356,33 +361,33 @@ void ath9k_start_btcoex(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
 
-       if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
-           !ah->btcoex_hw.enabled) {
-               if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
-                       ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                                  AR_STOMP_LOW_WLAN_WGHT, 0);
-               else
-                       ath9k_hw_btcoex_set_weight(ah, 0, 0,
-                                                  ATH_BTCOEX_STOMP_NONE);
-               ath9k_hw_btcoex_enable(ah);
+       if (ah->btcoex_hw.enabled ||
+           ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+               return;
 
-               if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
-                       ath9k_btcoex_timer_resume(sc);
-       }
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+               ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+                                          AR_STOMP_LOW_WLAN_WGHT, 0);
+       else
+               ath9k_hw_btcoex_set_weight(ah, 0, 0,
+                                          ATH_BTCOEX_STOMP_NONE);
+       ath9k_hw_btcoex_enable(ah);
+       ath9k_btcoex_timer_resume(sc);
 }
 
 void ath9k_stop_btcoex(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
 
-       if (ah->btcoex_hw.enabled &&
-           ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
-               if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
-                       ath9k_btcoex_timer_pause(sc);
-               ath9k_hw_btcoex_disable(ah);
-               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
-                       ath_mci_flush_profile(&sc->btcoex.mci);
-       }
+       if (!ah->btcoex_hw.enabled ||
+           ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+               return;
+
+       ath9k_btcoex_timer_pause(sc);
+       ath9k_hw_btcoex_disable(ah);
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+               ath_mci_flush_profile(&sc->btcoex.mci);
 }
 
 void ath9k_deinit_btcoex(struct ath_softc *sc)
@@ -409,22 +414,20 @@ int ath9k_init_btcoex(struct ath_softc *sc)
                break;
        case ATH_BTCOEX_CFG_3WIRE:
                ath9k_hw_btcoex_init_3wire(sc->sc_ah);
-               r = ath_init_btcoex_timer(sc);
-               if (r)
-                       return -1;
+               ath_init_btcoex_timer(sc);
                txq = sc->tx.txq_map[IEEE80211_AC_BE];
                ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
-               sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
-               if (ath9k_hw_mci_is_enabled(ah)) {
-                       sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
-                       INIT_LIST_HEAD(&sc->btcoex.mci.info);
+               break;
+       case ATH_BTCOEX_CFG_MCI:
+               ath_init_btcoex_timer(sc);
 
-                       r = ath_mci_setup(sc);
-                       if (r)
-                               return r;
+               sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+               INIT_LIST_HEAD(&sc->btcoex.mci.info);
+               ath9k_hw_btcoex_init_mci(ah);
 
-                       ath9k_hw_btcoex_init_mci(ah);
-               }
+               r = ath_mci_setup(sc);
+               if (r)
+                       return r;
 
                break;
        default:
index 8e7153b186ede94c4409fb422da8e7d4c58e9b0c..10c02f5cbc5eb8ec45abbbbab4c11ad973aabbc1 100644 (file)
@@ -40,6 +40,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
        { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
        { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
        { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
+       { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
 
        { USB_DEVICE(0x0cf3, 0x7015),
          .driver_info = AR9287_USB },  /* Atheros */
index 92d5a6c5a2253b6fbc54e2ad4cb79d15b7b0e45b..564923c0df87cdad5226da74a0d9e575cc66bfd4 100644 (file)
@@ -149,7 +149,7 @@ static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
         * when matching addresses.
         */
        iter_data.hw_macaddr = NULL;
-       memset(&iter_data.mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(iter_data.mask);
 
        if (vif)
                ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
index e82e570de330386c31e76f010c3b6a5e65b79b37..29a25d92add7453161d51eca5c6bb99b802328f3 100644 (file)
@@ -27,6 +27,7 @@
 #include "eeprom.h"
 #include "calib.h"
 #include "reg.h"
+#include "reg_mci.h"
 #include "phy.h"
 #include "btcoex.h"
 #include "dynack.h"
index 9ede991b8d767cfd2268a9137dcaa57d171af174..b0badef71ce793e5bc85358e0166208edbb9688b 100644 (file)
@@ -994,7 +994,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
         * BSSID mask when matching addresses.
         */
        memset(iter_data, 0, sizeof(*iter_data));
-       memset(&iter_data->mask, 0xff, ETH_ALEN);
+       eth_broadcast_addr(iter_data->mask);
        iter_data->slottime = ATH9K_SLOT_TIME_9;
 
        list_for_each_entry(avp, &ctx->vifs, list)
@@ -1139,7 +1139,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
                        ctx->primary_sta = iter_data.primary_sta;
                } else {
                        ctx->primary_sta = NULL;
-                       memset(common->curbssid, 0, ETH_ALEN);
+                       eth_zero_addr(common->curbssid);
                        common->curaid = 0;
                        ath9k_hw_write_associd(sc->sc_ah);
                        if (ath9k_hw_mci_is_enabled(sc->sc_ah))
@@ -1172,6 +1172,38 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
        ath9k_ps_restore(sc);
 }
 
+static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       int *power = (int *)data;
+
+       if (*power < vif->bss_conf.txpower)
+               *power = vif->bss_conf.txpower;
+}
+
+/* Called with sc->mutex held. */
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+       int power;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+       ath9k_ps_wakeup(sc);
+       if (ah->tpc_enabled) {
+               power = (vif) ? vif->bss_conf.txpower : -1;
+               ieee80211_iterate_active_interfaces_atomic(
+                               sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                               ath9k_tpc_vif_iter, &power);
+               if (power == -1)
+                       power = sc->hw->conf.power_level;
+       } else {
+               power = sc->hw->conf.power_level;
+       }
+       sc->cur_chan->txpower = 2 * power;
+       ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+       sc->cur_chan->cur_txpower = reg->max_power_level;
+       ath9k_ps_restore(sc);
+}
+
 static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
@@ -1225,6 +1257,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
 
        ath9k_assign_hw_queues(hw, vif);
 
+       ath9k_set_txpower(sc, vif);
+
        an->sc = sc;
        an->sta = NULL;
        an->vif = vif;
@@ -1265,6 +1299,8 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        ath9k_assign_hw_queues(hw, vif);
        ath9k_calculate_summary_state(sc, avp->chanctx);
 
+       ath9k_set_txpower(sc, vif);
+
        mutex_unlock(&sc->mutex);
        return 0;
 }
@@ -1294,6 +1330,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        ath9k_calculate_summary_state(sc, avp->chanctx);
 
+       ath9k_set_txpower(sc, NULL);
+
        mutex_unlock(&sc->mutex);
 }
 
@@ -1397,14 +1435,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
-               sc->cur_chan->txpower = 2 * conf->power_level;
-               ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
-                                      sc->cur_chan->txpower,
-                                      &sc->cur_chan->cur_txpower);
-       }
-
        mutex_unlock(&sc->mutex);
        ath9k_ps_restore(sc);
 
@@ -1764,6 +1794,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & CHECK_ANI)
                ath_check_ani(sc);
 
+       if (changed & BSS_CHANGED_TXPOWER) {
+               ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
+                       vif->addr, bss_conf->txpower, bss_conf->txpower_type);
+               ath9k_set_txpower(sc, vif);
+       }
+
        mutex_unlock(&sc->mutex);
        ath9k_ps_restore(sc);
 
index 3f7a11edb82a77dedcfdc5f38fa4c6c832e9579e..66596b95273fe6f98eef7756c8ca55297e35d023 100644 (file)
@@ -495,7 +495,7 @@ void ath_mci_intr(struct ath_softc *sc)
        ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
 
        if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
-               ar9003_mci_get_next_gpm_offset(ah, true, NULL);
+               ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
                return;
        }
 
@@ -559,8 +559,7 @@ void ath_mci_intr(struct ath_softc *sc)
                                return;
 
                        pgpm = mci->gpm_buf.bf_addr;
-                       offset = ar9003_mci_get_next_gpm_offset(ah, false,
-                                                               &more_data);
+                       offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
 
                        if (offset == MCI_GPM_INVALID)
                                break;
index 9587ec655680a281c4ed0338196989f385219900..1234399a43dd78692a52507a78185729a219a329 100644 (file)
@@ -2044,279 +2044,4 @@ enum {
 #define AR_PHY_AGC_CONTROL_YCOK_MAX            0x000003c0
 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S          6
 
-/* MCI Registers */
-
-#define AR_MCI_COMMAND0                                0x1800
-#define AR_MCI_COMMAND0_HEADER                 0xFF
-#define AR_MCI_COMMAND0_HEADER_S               0
-#define AR_MCI_COMMAND0_LEN                    0x1f00
-#define AR_MCI_COMMAND0_LEN_S                  8
-#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP      0x2000
-#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S    13
-
-#define AR_MCI_COMMAND1                                0x1804
-
-#define AR_MCI_COMMAND2                                0x1808
-#define AR_MCI_COMMAND2_RESET_TX               0x01
-#define AR_MCI_COMMAND2_RESET_TX_S             0
-#define AR_MCI_COMMAND2_RESET_RX               0x02
-#define AR_MCI_COMMAND2_RESET_RX_S             1
-#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES     0x3FC
-#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S   2
-#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP        0x400
-#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S      10
-
-#define AR_MCI_RX_CTRL                         0x180c
-
-#define AR_MCI_TX_CTRL                         0x1810
-/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
-#define AR_MCI_TX_CTRL_CLK_DIV                 0x03
-#define AR_MCI_TX_CTRL_CLK_DIV_S               0
-#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE      0x04
-#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S    2
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ                0xFFFFF8
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S      3
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM         0xF000000
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S       24
-
-#define AR_MCI_MSG_ATTRIBUTES_TABLE                    0x1814
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM           0xFFFF
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S         0
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR                0xFFFF0000
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S      16
-
-#define AR_MCI_SCHD_TABLE_0                            0x1818
-#define AR_MCI_SCHD_TABLE_1                            0x181c
-#define AR_MCI_GPM_0                                   0x1820
-#define AR_MCI_GPM_1                                   0x1824
-#define AR_MCI_GPM_WRITE_PTR                           0xFFFF0000
-#define AR_MCI_GPM_WRITE_PTR_S                         16
-#define AR_MCI_GPM_BUF_LEN                             0x0000FFFF
-#define AR_MCI_GPM_BUF_LEN_S                           0
-
-#define AR_MCI_INTERRUPT_RAW                           0x1828
-#define AR_MCI_INTERRUPT_EN                            0x182c
-#define AR_MCI_INTERRUPT_SW_MSG_DONE                   0x00000001
-#define AR_MCI_INTERRUPT_SW_MSG_DONE_S                 0
-#define AR_MCI_INTERRUPT_CPU_INT_MSG                   0x00000002
-#define AR_MCI_INTERRUPT_CPU_INT_MSG_S                 1
-#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL                 0x00000004
-#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S               2
-#define AR_MCI_INTERRUPT_RX_INVALID_HDR                        0x00000008
-#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S              3
-#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL                        0x00000010
-#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S              4
-#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL                        0x00000020
-#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S              5
-#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL                        0x00000080
-#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S              7
-#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL                        0x00000100
-#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S              8
-#define AR_MCI_INTERRUPT_RX_MSG                                0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_S                      9
-#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE           0x00000400
-#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S         10
-#define AR_MCI_INTERRUPT_BT_PRI                                0x07fff800
-#define AR_MCI_INTERRUPT_BT_PRI_S                      11
-#define AR_MCI_INTERRUPT_BT_PRI_THRESH                 0x08000000
-#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S               27
-#define AR_MCI_INTERRUPT_BT_FREQ                       0x10000000
-#define AR_MCI_INTERRUPT_BT_FREQ_S                     28
-#define AR_MCI_INTERRUPT_BT_STOMP                      0x20000000
-#define AR_MCI_INTERRUPT_BT_STOMP_S                    29
-#define AR_MCI_INTERRUPT_BB_AIC_IRQ                    0x40000000
-#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S                  30
-#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT             0x80000000
-#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S           31
-
-#define AR_MCI_INTERRUPT_DEFAULT    (AR_MCI_INTERRUPT_SW_MSG_DONE        | \
-                                    AR_MCI_INTERRUPT_RX_INVALID_HDR      | \
-                                    AR_MCI_INTERRUPT_RX_HW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_RX_SW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_TX_HW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_TX_SW_MSG_FAIL      | \
-                                    AR_MCI_INTERRUPT_RX_MSG              | \
-                                    AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
-                                    AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
-
-#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
-                                       AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
-                                       AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
-                                       AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
-
-#define AR_MCI_REMOTE_CPU_INT                          0x1830
-#define AR_MCI_REMOTE_CPU_INT_EN                       0x1834
-#define AR_MCI_INTERRUPT_RX_MSG_RAW                    0x1838
-#define AR_MCI_INTERRUPT_RX_MSG_EN                     0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET           0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S         0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL            0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S          1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK              0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S            2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO              0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S            3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST               0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S             4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO              0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S            5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT                        0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S              6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM                    0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S                  8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO               0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S             9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING           0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S         10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING             0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S           11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE               0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S             12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK         (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
-                                         AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
-                                         AR_MCI_INTERRUPT_RX_MSG_LNA_INFO   | \
-                                         AR_MCI_INTERRUPT_RX_MSG_CONT_NACK  | \
-                                         AR_MCI_INTERRUPT_RX_MSG_CONT_INFO  | \
-                                         AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
-
-#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM    | \
-                                        AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
-                                        AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING  | \
-                                        AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
-                                        AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
-
-#define AR_MCI_CPU_INT                                 0x1840
-
-#define AR_MCI_RX_STATUS                       0x1844
-#define AR_MCI_RX_LAST_SCHD_MSG_INDEX          0x00000F00
-#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S                8
-#define AR_MCI_RX_REMOTE_SLEEP                 0x00001000
-#define AR_MCI_RX_REMOTE_SLEEP_S               12
-#define AR_MCI_RX_MCI_CLK_REQ                  0x00002000
-#define AR_MCI_RX_MCI_CLK_REQ_S                        13
-
-#define AR_MCI_CONT_STATUS                     0x1848
-#define AR_MCI_CONT_RSSI_POWER                 0x000000FF
-#define AR_MCI_CONT_RSSI_POWER_S               0
-#define AR_MCI_CONT_PRIORITY                   0x0000FF00
-#define AR_MCI_CONT_PRIORITY_S                 8
-#define AR_MCI_CONT_TXRX                       0x00010000
-#define AR_MCI_CONT_TXRX_S                     16
-
-#define AR_MCI_BT_PRI0                         0x184c
-#define AR_MCI_BT_PRI1                         0x1850
-#define AR_MCI_BT_PRI2                         0x1854
-#define AR_MCI_BT_PRI3                         0x1858
-#define AR_MCI_BT_PRI                          0x185c
-#define AR_MCI_WL_FREQ0                                0x1860
-#define AR_MCI_WL_FREQ1                                0x1864
-#define AR_MCI_WL_FREQ2                                0x1868
-#define AR_MCI_GAIN                            0x186c
-#define AR_MCI_WBTIMER1                                0x1870
-#define AR_MCI_WBTIMER2                                0x1874
-#define AR_MCI_WBTIMER3                                0x1878
-#define AR_MCI_WBTIMER4                                0x187c
-#define AR_MCI_MAXGAIN                         0x1880
-#define AR_MCI_HW_SCHD_TBL_CTL                 0x1884
-#define AR_MCI_HW_SCHD_TBL_D0                  0x1888
-#define AR_MCI_HW_SCHD_TBL_D1                  0x188c
-#define AR_MCI_HW_SCHD_TBL_D2                  0x1890
-#define AR_MCI_HW_SCHD_TBL_D3                  0x1894
-#define AR_MCI_TX_PAYLOAD0                     0x1898
-#define AR_MCI_TX_PAYLOAD1                     0x189c
-#define AR_MCI_TX_PAYLOAD2                     0x18a0
-#define AR_MCI_TX_PAYLOAD3                     0x18a4
-#define AR_BTCOEX_WBTIMER                      0x18a8
-
-#define AR_BTCOEX_CTRL                                 0x18ac
-#define AR_BTCOEX_CTRL_AR9462_MODE                     0x00000001
-#define AR_BTCOEX_CTRL_AR9462_MODE_S                   0
-#define AR_BTCOEX_CTRL_WBTIMER_EN                      0x00000002
-#define AR_BTCOEX_CTRL_WBTIMER_EN_S                    1
-#define AR_BTCOEX_CTRL_MCI_MODE_EN                     0x00000004
-#define AR_BTCOEX_CTRL_MCI_MODE_EN_S                   2
-#define AR_BTCOEX_CTRL_LNA_SHARED                      0x00000008
-#define AR_BTCOEX_CTRL_LNA_SHARED_S                    3
-#define AR_BTCOEX_CTRL_PA_SHARED                       0x00000010
-#define AR_BTCOEX_CTRL_PA_SHARED_S                     4
-#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN          0x00000020
-#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S                5
-#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN       0x00000040
-#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S     6
-#define AR_BTCOEX_CTRL_NUM_ANTENNAS                    0x00000180
-#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S                  7
-#define AR_BTCOEX_CTRL_RX_CHAIN_MASK                   0x00000E00
-#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S                 9
-#define AR_BTCOEX_CTRL_AGGR_THRESH                     0x00007000
-#define AR_BTCOEX_CTRL_AGGR_THRESH_S                   12
-#define AR_BTCOEX_CTRL_1_CHAIN_BCN                     0x00080000
-#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S                   19
-#define AR_BTCOEX_CTRL_1_CHAIN_ACK                     0x00100000
-#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S                   20
-#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN                  0x1FE00000
-#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S                        28
-#define AR_BTCOEX_CTRL_REDUCE_TXPWR                    0x20000000
-#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S                  29
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_10                  0x40000000
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S                        30
-#define AR_BTCOEX_CTRL_SPDT_POLARITY                   0x80000000
-#define AR_BTCOEX_CTRL_SPDT_POLARITY_S                 31
-
-#define AR_BTCOEX_MAX_TXPWR(_x)                                (0x18c0 + ((_x) << 2))
-#define AR_BTCOEX_WL_LNA                               0x1940
-#define AR_BTCOEX_RFGAIN_CTRL                          0x1944
-#define AR_BTCOEX_WL_LNA_TIMEOUT                       0x003FFFFF
-#define AR_BTCOEX_WL_LNA_TIMEOUT_S                     0
-
-#define AR_BTCOEX_CTRL2                                        0x1948
-#define AR_BTCOEX_CTRL2_TXPWR_THRESH                   0x0007F800
-#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S                 11
-#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK                  0x00380000
-#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S                        19
-#define AR_BTCOEX_CTRL2_RX_DEWEIGHT                    0x00400000
-#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S                  22
-#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL                   0x00800000
-#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S                 23
-#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL                 0x01000000
-#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S               24
-#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE                0x02000000
-#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S      25
-
-#define AR_BTCOEX_CTRL_SPDT_ENABLE          0x00000001
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_S        0
-#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL     0x00000002
-#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S   1
-#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT   0x00000004
-#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
-#define AR_GLB_WLAN_UART_INTF_EN            0x00020000
-#define AR_GLB_WLAN_UART_INTF_EN_S          17
-#define AR_GLB_DS_JTAG_DISABLE              0x00040000
-#define AR_GLB_DS_JTAG_DISABLE_S            18
-
-#define AR_BTCOEX_RC                    0x194c
-#define AR_BTCOEX_MAX_RFGAIN(_x)        (0x1950 + ((_x) << 2))
-#define AR_BTCOEX_DBG                   0x1a50
-#define AR_MCI_LAST_HW_MSG_HDR          0x1a54
-#define AR_MCI_LAST_HW_MSG_BDY          0x1a58
-
-#define AR_MCI_SCHD_TABLE_2             0x1a5c
-#define AR_MCI_SCHD_TABLE_2_MEM_BASED   0x00000001
-#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
-#define AR_MCI_SCHD_TABLE_2_HW_BASED    0x00000002
-#define AR_MCI_SCHD_TABLE_2_HW_BASED_S  1
-
-#define AR_BTCOEX_CTRL3               0x1a60
-#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT      0x00000fff
-#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S    0
-
-#define AR_GLB_SWREG_DISCONT_MODE         0x2002c
-#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN   0x3
-
-#define AR_MCI_MISC                    0x1a74
-#define AR_MCI_MISC_HW_FIX_EN          0x00000001
-#define AR_MCI_MISC_HW_FIX_EN_S        0
-#define AR_MCI_DBG_CNT_CTRL            0x1a78
-#define AR_MCI_DBG_CNT_CTRL_ENABLE     0x00000001
-#define AR_MCI_DBG_CNT_CTRL_ENABLE_S   0
-
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/reg_mci.h b/drivers/net/wireless/ath/ath9k/reg_mci.h
new file mode 100644 (file)
index 0000000..6251310
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_MCI_H
+#define REG_MCI_H
+
+#define AR_MCI_COMMAND0                                 0x1800
+#define AR_MCI_COMMAND0_HEADER                          0xFF
+#define AR_MCI_COMMAND0_HEADER_S                        0
+#define AR_MCI_COMMAND0_LEN                             0x1f00
+#define AR_MCI_COMMAND0_LEN_S                           8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP               0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S             13
+
+#define AR_MCI_COMMAND1                                 0x1804
+
+#define AR_MCI_COMMAND2                                 0x1808
+#define AR_MCI_COMMAND2_RESET_TX                        0x01
+#define AR_MCI_COMMAND2_RESET_TX_S                      0
+#define AR_MCI_COMMAND2_RESET_RX                        0x02
+#define AR_MCI_COMMAND2_RESET_RX_S                      1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES             0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S           2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP                0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S              10
+
+#define AR_MCI_RX_CTRL                                  0x180c
+
+#define AR_MCI_TX_CTRL                                  0x1810
+/*
+ * 0 = no division,
+ * 1 = divide by 2,
+ * 2 = divide by 4,
+ * 3 = divide by 8
+ */
+#define AR_MCI_TX_CTRL_CLK_DIV                          0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S                        0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE               0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S             2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ                 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S               3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM                  0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S                24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE                     0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM            0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S          0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR         0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S       16
+
+#define AR_MCI_SCHD_TABLE_0                             0x1818
+#define AR_MCI_SCHD_TABLE_1                             0x181c
+#define AR_MCI_GPM_0                                    0x1820
+#define AR_MCI_GPM_1                                    0x1824
+#define AR_MCI_GPM_WRITE_PTR                            0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S                          16
+#define AR_MCI_GPM_BUF_LEN                              0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S                            0
+
+#define AR_MCI_INTERRUPT_RAW                            0x1828
+
+#define AR_MCI_INTERRUPT_EN                             0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE                    0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S                  0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG                    0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S                  1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL                  0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S                2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR                 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S               3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL                 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S               4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL                 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S               5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL                 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S               7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL                 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S               8
+#define AR_MCI_INTERRUPT_RX_MSG                         0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S                       9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE            0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S          10
+#define AR_MCI_INTERRUPT_BT_PRI                         0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S                       11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH                  0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S                27
+#define AR_MCI_INTERRUPT_BT_FREQ                        0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S                      28
+#define AR_MCI_INTERRUPT_BT_STOMP                       0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S                     29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ                     0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S                   30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT              0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S            31
+
+#define AR_MCI_REMOTE_CPU_INT                           0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN                        0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW                     0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN                      0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET            0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S          0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL             0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S           1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK               0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S             2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO               0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S             3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST                0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S              4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO               0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S             5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT                 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S               6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM                     0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S                   8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO                0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S              9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING            0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S          10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING              0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S            11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE                0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S              12
+
+#define AR_MCI_CPU_INT                                  0x1840
+
+#define AR_MCI_RX_STATUS                                0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX                   0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S                 8
+#define AR_MCI_RX_REMOTE_SLEEP                          0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S                        12
+#define AR_MCI_RX_MCI_CLK_REQ                           0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S                         13
+
+#define AR_MCI_CONT_STATUS                              0x1848
+#define AR_MCI_CONT_RSSI_POWER                          0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S                        0
+#define AR_MCI_CONT_PRIORITY                            0x0000FF00
+#define AR_MCI_CONT_PRIORITY_S                          8
+#define AR_MCI_CONT_TXRX                                0x00010000
+#define AR_MCI_CONT_TXRX_S                              16
+
+#define AR_MCI_BT_PRI0                                  0x184c
+#define AR_MCI_BT_PRI1                                  0x1850
+#define AR_MCI_BT_PRI2                                  0x1854
+#define AR_MCI_BT_PRI3                                  0x1858
+#define AR_MCI_BT_PRI                                   0x185c
+#define AR_MCI_WL_FREQ0                                 0x1860
+#define AR_MCI_WL_FREQ1                                 0x1864
+#define AR_MCI_WL_FREQ2                                 0x1868
+#define AR_MCI_GAIN                                     0x186c
+#define AR_MCI_WBTIMER1                                 0x1870
+#define AR_MCI_WBTIMER2                                 0x1874
+#define AR_MCI_WBTIMER3                                 0x1878
+#define AR_MCI_WBTIMER4                                 0x187c
+#define AR_MCI_MAXGAIN                                  0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL                          0x1884
+#define AR_MCI_HW_SCHD_TBL_D0                           0x1888
+#define AR_MCI_HW_SCHD_TBL_D1                           0x188c
+#define AR_MCI_HW_SCHD_TBL_D2                           0x1890
+#define AR_MCI_HW_SCHD_TBL_D3                           0x1894
+#define AR_MCI_TX_PAYLOAD0                              0x1898
+#define AR_MCI_TX_PAYLOAD1                              0x189c
+#define AR_MCI_TX_PAYLOAD2                              0x18a0
+#define AR_MCI_TX_PAYLOAD3                              0x18a4
+#define AR_BTCOEX_WBTIMER                               0x18a8
+
+#define AR_BTCOEX_CTRL                                  0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE                      0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S                    0
+#define AR_BTCOEX_CTRL_WBTIMER_EN                       0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S                     1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN                      0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S                    2
+#define AR_BTCOEX_CTRL_LNA_SHARED                       0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S                     3
+#define AR_BTCOEX_CTRL_PA_SHARED                        0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S                      4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN           0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S         5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN        0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S      6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS                     0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S                   7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK                    0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S                  9
+#define AR_BTCOEX_CTRL_AGGR_THRESH                      0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S                    12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN                      0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S                    19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK                      0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S                    20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN                   0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S                 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR                     0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S                   29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10                   0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S                 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY                    0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S                  31
+
+#define AR_BTCOEX_WL_WEIGHTS0                           0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1                           0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2                           0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3                           0x18bc
+
+#define AR_BTCOEX_MAX_TXPWR(_x)                         (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA                                0x1940
+#define AR_BTCOEX_RFGAIN_CTRL                           0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT                        0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S                      0
+
+#define AR_BTCOEX_CTRL2                                 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH                    0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S                  11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK                   0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S                 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT                     0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S                   22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL                    0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S                  23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL                  0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S                24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE         0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S       25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE                      0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S                    0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL                 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S               1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT               0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S             2
+#define AR_GLB_WLAN_UART_INTF_EN                        0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S                      17
+#define AR_GLB_DS_JTAG_DISABLE                          0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S                        18
+
+#define AR_BTCOEX_RC                                    0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x)                        (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG                                   0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR                          0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY                          0x1a58
+
+#define AR_MCI_SCHD_TABLE_2                             0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED                   0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S                 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED                    0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S                  1
+
+#define AR_BTCOEX_CTRL3                                 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT               0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S             0
+
+#define AR_GLB_SWREG_DISCONT_MODE                       0x2002c
+#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN                 0x3
+
+#define AR_MCI_MISC                                     0x1a74
+#define AR_MCI_MISC_HW_FIX_EN                           0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S                         0
+
+#define AR_MCI_DBG_CNT_CTRL                             0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE                      0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S                    0
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID                   0x000007f8
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID_S                 3
+
+#define MCI_STAT_ALL_BT_LINKID                          0xffff
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE         | \
+                                 AR_MCI_INTERRUPT_RX_INVALID_HDR      | \
+                                 AR_MCI_INTERRUPT_RX_HW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_RX_SW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_TX_HW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_TX_SW_MSG_FAIL      | \
+                                 AR_MCI_INTERRUPT_RX_MSG              | \
+                                 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+                                 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+                                        AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+                                        AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+                                        AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_INFO    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_NACK   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM           | \
+                                         AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET  | \
+                                         AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING    | \
+                                         AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING  | \
+                                         AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#endif /* REG_MCI_H */
index 3abfca56ca5846acd89c6664879081e9f6f248de..453054078cc4785c770ea29a16c92cc5881d38bc 100644 (file)
@@ -72,7 +72,7 @@
 #define AR_WOW_MAC_INTR_EN              0x00040000
 #define AR_WOW_MAGIC_EN                 0x00010000
 #define AR_WOW_PATTERN_EN(x)            (x & 0xff)
-#define AR_WOW_PAT_FOUND_SHIFT  8
+#define AR_WOW_PAT_FOUND_SHIFT          8
 #define AR_WOW_PATTERN_FOUND(x)         (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
 #define AR_WOW_PATTERN_FOUND_MASK       ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
 #define AR_WOW_MAGIC_PAT_FOUND          0x00020000
                                                AR_WOW_BEACON_FAIL |    \
                                                AR_WOW_KEEP_ALIVE_FAIL))
 
+#define AR_WOW2_PATTERN_EN(x)           ((x & 0xff) << 0)
+#define AR_WOW2_PATTERN_FOUND_SHIFT     8
+#define AR_WOW2_PATTERN_FOUND(x)        (x & (0xff << AR_WOW2_PATTERN_FOUND_SHIFT))
+#define AR_WOW2_PATTERN_FOUND_MASK      ((0xff) << AR_WOW2_PATTERN_FOUND_SHIFT)
+
+#define AR_WOW_STATUS2(x)               (x & AR_WOW2_PATTERN_FOUND_MASK)
+#define AR_WOW_CLEAR_EVENTS2(x)         (x & ~(AR_WOW2_PATTERN_EN(0xff)))
+
 #define AR_WOW_AIFS_CNT(x)              (x & 0xff)
 #define AR_WOW_SLOT_CNT(x)              ((x & 0xff) << 8)
 #define AR_WOW_KEEP_ALIVE_CNT(x)        ((x & 0xff) << 16)
index 1b8e75c4d2c2d6659b8901dd915b0209bfb928f7..0acd079ba96bd3d2f60602ebf5f889f36da9f908 100644 (file)
@@ -1103,14 +1103,28 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
        struct sk_buff *skb;
        struct ath_frame_info *fi;
        struct ieee80211_tx_info *info;
+       struct ieee80211_vif *vif;
        struct ath_hw *ah = sc->sc_ah;
 
        if (sc->tx99_state || !ah->tpc_enabled)
                return MAX_RATE_POWER;
 
        skb = bf->bf_mpdu;
-       fi = get_frame_info(skb);
        info = IEEE80211_SKB_CB(skb);
+       vif = info->control.vif;
+
+       if (!vif) {
+               max_power = sc->cur_chan->cur_txpower;
+               goto out;
+       }
+
+       if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) {
+               max_power = min_t(u8, sc->cur_chan->cur_txpower,
+                                 2 * vif->bss_conf.txpower);
+               goto out;
+       }
+
+       fi = get_frame_info(skb);
 
        if (!AR_SREV_9300_20_OR_LATER(ah)) {
                int txpower = fi->tx_power;
@@ -1147,25 +1161,25 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
                        txpower -= 2;
 
                txpower = max(txpower, 0);
-               max_power = min_t(u8, ah->tx_power[rateidx], txpower);
-
-               /* XXX: clamp minimum TX power at 1 for AR9160 since if
-                * max_power is set to 0, frames are transmitted at max
-                * TX power
-                */
-               if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
-                       max_power = 1;
+               max_power = min_t(u8, ah->tx_power[rateidx],
+                                 2 * vif->bss_conf.txpower);
+               max_power = min_t(u8, max_power, txpower);
        } else if (!bf->bf_state.bfs_paprd) {
                if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
-                       max_power = min(ah->tx_power_stbc[rateidx],
-                                       fi->tx_power);
+                       max_power = min_t(u8, ah->tx_power_stbc[rateidx],
+                                         2 * vif->bss_conf.txpower);
                else
-                       max_power = min(ah->tx_power[rateidx], fi->tx_power);
+                       max_power = min_t(u8, ah->tx_power[rateidx],
+                                         2 * vif->bss_conf.txpower);
+               max_power = min(max_power, fi->tx_power);
        } else {
                max_power = ah->paprd_training_power;
        }
-
-       return max_power;
+out:
+       /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power
+        * is set to 0, frames are transmitted at max TX power
+        */
+       return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power;
 }
 
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
index 2d5ea21be47e592af98599e214697d6b022718f2..4bd708c8716c462afef1616bcae463902a414e41 100644 (file)
@@ -387,11 +387,25 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        int ch;
        int rc = 0;
 
+       wil_print_connect_params(wil, sme);
+
        if (test_bit(wil_status_fwconnecting, wil->status) ||
            test_bit(wil_status_fwconnected, wil->status))
                return -EALREADY;
 
-       wil_print_connect_params(wil, sme);
+       if (sme->ie_len > WMI_MAX_IE_LEN) {
+               wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
+               return -ERANGE;
+       }
+
+       rsn_eid = sme->ie ?
+                       cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+                       NULL;
+
+       if (sme->privacy && !rsn_eid) {
+               wil_err(wil, "Missing RSN IE for secure connection\n");
+               return -EINVAL;
+       }
 
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
@@ -407,17 +421,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                rc = -ENOENT;
                goto out;
        }
+       wil->privacy = sme->privacy;
 
-       rsn_eid = sme->ie ?
-                       cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
-                       NULL;
-       if (rsn_eid) {
-               if (sme->ie_len > WMI_MAX_IE_LEN) {
-                       rc = -ERANGE;
-                       wil_err(wil, "IE too large (%td bytes)\n",
-                               sme->ie_len);
-                       goto out;
-               }
+       if (wil->privacy) {
                /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
                rc = wmi_del_cipher_key(wil, 0, bss->bssid);
                if (rc) {
@@ -450,7 +456,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                        bss->capability);
                goto out;
        }
-       if (rsn_eid) {
+       if (wil->privacy) {
                conn.dot11_auth_mode = WMI_AUTH11_SHARED;
                conn.auth_mode = WMI_AUTH_WPA2_PSK;
                conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
@@ -769,7 +775,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
                   bcon->assocresp_ies);
 
-       wil->secure_pcp = info->privacy;
+       wil->privacy = info->privacy;
 
        netif_carrier_on(ndev);
 
index 45c3558ec8042e3db83203bd03a36a51f124d3eb..3830cc20d4fa525f87a9c72f3391c8ce76501a9b 100644 (file)
@@ -29,6 +29,7 @@
 static u32 mem_addr;
 static u32 dbg_txdesc_index;
 static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */
 
 enum dbg_off_type {
        doff_u32 = 0,
@@ -102,23 +103,30 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
                                   % vring->size;
                        int avail = vring->size - used - 1;
                        char name[10];
+                       char sidle[10];
                        /* performance monitoring */
                        cycles_t now = get_cycles();
                        uint64_t idle = txdata->idle * 100;
                        uint64_t total = now - txdata->begin;
 
-                       do_div(idle, total);
+                       if (total != 0) {
+                               do_div(idle, total);
+                               snprintf(sidle, sizeof(sidle), "%3d%%",
+                                        (int)idle);
+                       } else {
+                               snprintf(sidle, sizeof(sidle), "N/A");
+                       }
                        txdata->begin = now;
                        txdata->idle = 0ULL;
 
                        snprintf(name, sizeof(name), "tx_%2d", i);
 
                        seq_printf(s,
-                                  "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n",
-                                  wil->sta[cid].addr, cid, tid,
-                                  txdata->agg_wsize, txdata->agg_timeout,
-                                  txdata->agg_amsdu ? "+" : "-",
-                                  used, avail, (int)idle);
+                               "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n",
+                               wil->sta[cid].addr, cid, tid,
+                               txdata->agg_wsize, txdata->agg_timeout,
+                               txdata->agg_amsdu ? "+" : "-",
+                               used, avail, sidle);
 
                        wil_print_vring(s, wil, name, vring, '_', 'H');
                }
@@ -549,7 +557,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
        dev_close(ndev);
        ndev->flags &= ~IFF_UP;
        rtnl_unlock();
-       wil_reset(wil);
+       wil_reset(wil, true);
 
        return len;
 }
@@ -618,7 +626,7 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
        struct wil6210_priv *wil = file->private_data;
        int rc;
        char *kbuf = kmalloc(len + 1, GFP_KERNEL);
-       char cmd[8];
+       char cmd[9];
        int p1, p2, p3;
 
        if (!kbuf)
@@ -1392,7 +1400,7 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
 
 /* fields in struct wil6210_priv */
 static const struct dbg_off dbg_wil_off[] = {
-       WIL_FIELD(secure_pcp,   S_IRUGO | S_IWUSR,      doff_u32),
+       WIL_FIELD(privacy,      S_IRUGO,                doff_u32),
        WIL_FIELD(status[0],    S_IRUGO | S_IWUSR,      doff_ulong),
        WIL_FIELD(fw_version,   S_IRUGO,                doff_u32),
        WIL_FIELD(hw_version,   S_IRUGO,                doff_x32),
@@ -1412,6 +1420,8 @@ static const struct dbg_off dbg_statics[] = {
        {"desc_index",  S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
        {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
        {"mem_addr",    S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
+       {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+        doff_u32},
        {},
 };
 
index 4c44a82c34d79577e6e454bd42bbb9b8697d828e..0ea695ff98adeda1185382bfda7b58958b579275 100644 (file)
@@ -50,27 +50,19 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
-       if (test_bit(hw_capability_advanced_itr_moderation,
-                    wil->hw_capabilities)) {
-               tx_itr_en = ioread32(wil->csr +
-                                    HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
-               if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
-                       tx_itr_val =
-                               ioread32(wil->csr +
-                                        HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
-
-               rx_itr_en = ioread32(wil->csr +
-                                    HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
-               if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
-                       rx_itr_val =
-                               ioread32(wil->csr +
-                                        HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
-       } else {
-               rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
-               if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN)
-                       rx_itr_val = ioread32(wil->csr +
-                                             HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
-       }
+       tx_itr_en = ioread32(wil->csr +
+                            HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+       if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
+               tx_itr_val =
+                       ioread32(wil->csr +
+                                HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+
+       rx_itr_en = ioread32(wil->csr +
+                            HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+       if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
+               rx_itr_val =
+                       ioread32(wil->csr +
+                                HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
 
        cp->tx_coalesce_usecs = tx_itr_val;
        cp->rx_coalesce_usecs = rx_itr_val;
index 93c5cc16c515c8df5bfc3e2be9dea61e3249ee5a..4428345e5a470360560ceb82772349cf8f754a7f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -20,6 +20,7 @@
 #include "fw.h"
 
 MODULE_FIRMWARE(WIL_FW_NAME);
+MODULE_FIRMWARE(WIL_FW2_NAME);
 
 /* target operations */
 /* register read */
index d4acf93a9a02b9fbfd9f5d063726bdd5e84d47c5..157f5ef384e0cc2804229044f369ac813c69a28d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -451,8 +451,6 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
                }
                return -EINVAL;
        }
-       /* Mark FW as loaded from host */
-       S(RGF_USER_USAGE_6, 1);
 
        return rc;
 }
index a6f923086f310d5795ef5beb4c3ebf61315001e9..28ffc18466c4b1e1d2887f1887fc3decf36e73e2 100644 (file)
@@ -166,9 +166,16 @@ void wil_unmask_irq(struct wil6210_priv *wil)
 /* target write operation */
 #define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
 
-static
-void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
+       wil_dbg_irq(wil, "%s()\n", __func__);
+
+       /* disable interrupt moderation for monitor
+        * to get better timestamp precision
+        */
+       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
+               return;
+
        /* Disable and clear tx counter before (re)configuration */
        W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
        W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
@@ -206,42 +213,8 @@ void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
                                      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
 }
 
-static
-void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil)
-{
-       /* disable, use usec resolution */
-       W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR);
-
-       wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration);
-       W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration);
-       /* start it */
-       W(RGF_DMA_ITR_CNT_CRL,
-         BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK);
-}
-
 #undef W
 
-void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
-{
-       wil_dbg_irq(wil, "%s()\n", __func__);
-
-       /* disable interrupt moderation for monitor
-        * to get better timestamp precision
-        */
-       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
-               return;
-
-       if (test_bit(hw_capability_advanced_itr_moderation,
-                    wil->hw_capabilities))
-               wil_configure_interrupt_moderation_new(wil);
-       else {
-               /* Advanced interrupt moderation is not available before
-                * Sparrow v2. Will use legacy interrupt moderation
-                */
-               wil_configure_interrupt_moderation_lgc(wil);
-       }
-}
-
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
@@ -253,7 +226,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
        trace_wil6210_irq_rx(isr);
        wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
-       if (!isr) {
+       if (unlikely(!isr)) {
                wil_err(wil, "spurious IRQ: RX\n");
                return IRQ_NONE;
        }
@@ -266,17 +239,18 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
         * action is always the same - should empty the accumulated
         * packets from the RX ring.
         */
-       if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) {
+       if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
+                         BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
                wil_dbg_irq(wil, "RX done\n");
 
-               if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
+               if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
                        wil_err_ratelimited(wil,
                                            "Received \"Rx buffer is in risk of overflow\" interrupt\n");
 
                isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
                         BIT_DMA_EP_RX_ICR_RX_HTRSH);
-               if (test_bit(wil_status_reset_done, wil->status)) {
-                       if (test_bit(wil_status_napi_en, wil->status)) {
+               if (likely(test_bit(wil_status_reset_done, wil->status))) {
+                       if (likely(test_bit(wil_status_napi_en, wil->status))) {
                                wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
                                need_unmask = false;
                                napi_schedule(&wil->napi_rx);
@@ -289,7 +263,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
                }
        }
 
-       if (isr)
+       if (unlikely(isr))
                wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
 
        /* Rx IRQ will be enabled when NAPI processing finished */
@@ -313,19 +287,19 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
        trace_wil6210_irq_tx(isr);
        wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
-       if (!isr) {
+       if (unlikely(!isr)) {
                wil_err(wil, "spurious IRQ: TX\n");
                return IRQ_NONE;
        }
 
        wil6210_mask_irq_tx(wil);
 
-       if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+       if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
                wil_dbg_irq(wil, "TX done\n");
                isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
                /* clear also all VRING interrupts */
                isr &= ~(BIT(25) - 1UL);
-               if (test_bit(wil_status_reset_done, wil->status)) {
+               if (likely(test_bit(wil_status_reset_done, wil->status))) {
                        wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
                        need_unmask = false;
                        napi_schedule(&wil->napi_tx);
@@ -334,7 +308,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
                }
        }
 
-       if (isr)
+       if (unlikely(isr))
                wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
 
        /* Tx IRQ will be enabled when NAPI processing finished */
@@ -523,11 +497,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        /**
         * pseudo_cause is Clear-On-Read, no need to ACK
         */
-       if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+       if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
                return IRQ_NONE;
 
        /* FIXME: IRQ mask debug */
-       if (wil6210_debug_irq_mask(wil, pseudo_cause))
+       if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
                return IRQ_NONE;
 
        trace_wil6210_irq_pseudo(pseudo_cause);
index b04e0afdcb216724b1329085f038c4da2d335016..db74e811f5c424667bae5e136984750298fc92dd 100644 (file)
@@ -29,10 +29,6 @@ bool no_fw_recovery;
 module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
 
-static bool no_fw_load = true;
-module_param(no_fw_load, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash.");
-
 /* if not set via modparam, will be set to default value of 1/8 of
  * rx ring size during init flow
  */
@@ -520,8 +516,6 @@ static int wil_target_reset(struct wil6210_priv *wil)
 {
        int delay = 0;
        u32 x;
-       bool is_reset_v2 = test_bit(hw_capability_reset_v2,
-                                   wil->hw_capabilities);
 
        wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
@@ -532,82 +526,67 @@ static int wil_target_reset(struct wil6210_priv *wil)
 
        wil_halt_cpu(wil);
 
+       /* clear all boot loader "ready" bits */
+       W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
        /* Clear Fw Download notification */
        C(RGF_USER_USAGE_6, BIT(0));
 
-       if (is_reset_v2) {
-               S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
-               /* XTAL stabilization should take about 3ms */
-               usleep_range(5000, 7000);
-               x = R(RGF_CAF_PLL_LOCK_STATUS);
-               if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
-                       wil_err(wil, "Xtal stabilization timeout\n"
-                               "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
-                       return -ETIME;
-               }
-               /* switch 10k to XTAL*/
-               C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
-               /* 40 MHz */
-               C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
-
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+       S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+       /* XTAL stabilization should take about 3ms */
+       usleep_range(5000, 7000);
+       x = R(RGF_CAF_PLL_LOCK_STATUS);
+       if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+               wil_err(wil, "Xtal stabilization timeout\n"
+                       "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+               return -ETIME;
        }
+       /* switch 10k to XTAL*/
+       C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+       /* 40 MHz */
+       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3,
-         is_reset_v2 ? 0x000000f0 : 0x00000170);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
 
-       if (is_reset_v2) {
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
-               W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
-       }
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-       if (is_reset_v2) {
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
-               /* reset A2 PCIE AHB */
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
-       } else {
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
-               W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
-               W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
-       }
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
 
-       /* TODO: check order here!!! Erez code is different */
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-       /* wait until device ready. typical time is 200..250 msec */
+       /* wait until device ready. typical time is 20..80 msec */
        do {
                msleep(RST_DELAY);
-               x = R(RGF_USER_HW_MACHINE_STATE);
+               x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
                if (delay++ > RST_COUNT) {
-                       wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
+                       wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
                                x);
                        return -ETIME;
                }
-       } while (x != HW_MACHINE_BOOT_DONE);
-
-       if (!is_reset_v2)
-               W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
+       } while (!(x & BIT_BL_READY));
 
        C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
+       /* enable fix for HW bug related to the SA/DA swap in AP Rx */
+       S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+         BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+
        wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
        return 0;
 }
 
-#undef R
-#undef W
-#undef S
-#undef C
-
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 {
        le32_to_cpus(&r->base);
@@ -617,6 +596,32 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
        le32_to_cpus(&r->head);
 }
 
+static int wil_get_bl_info(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct RGF_BL bl;
+
+       wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
+       le32_to_cpus(&bl.ready);
+       le32_to_cpus(&bl.version);
+       le32_to_cpus(&bl.rf_type);
+       le32_to_cpus(&bl.baseband_type);
+
+       if (!is_valid_ether_addr(bl.mac_address)) {
+               wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+               return -EINVAL;
+       }
+
+       ether_addr_copy(ndev->perm_addr, bl.mac_address);
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               ether_addr_copy(ndev->dev_addr, bl.mac_address);
+       wil_info(wil,
+                "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+                bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+
+       return 0;
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
        ulong to = msecs_to_jiffies(1000);
@@ -637,7 +642,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
  * After calling this routine, you're expected to reload
  * the firmware.
  */
-int wil_reset(struct wil6210_priv *wil)
+int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
        int rc;
 
@@ -675,30 +680,36 @@ int wil_reset(struct wil6210_priv *wil)
        if (rc)
                return rc;
 
-       if (!no_fw_load) {
-               wil_info(wil, "Use firmware <%s>\n", WIL_FW_NAME);
+       rc = wil_get_bl_info(wil);
+       if (rc)
+               return rc;
+
+       if (load_fw) {
+               wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
+                        WIL_FW2_NAME);
+
                wil_halt_cpu(wil);
                /* Loading f/w from the file */
                rc = wil_request_firmware(wil, WIL_FW_NAME);
+               if (rc)
+                       return rc;
+               rc = wil_request_firmware(wil, WIL_FW2_NAME);
                if (rc)
                        return rc;
 
-               /* clear any interrupts which on-card-firmware may have set */
+               /* Mark FW as loaded from host */
+               S(RGF_USER_USAGE_6, 1);
+
+               /* clear any interrupts which on-card-firmware
+                * may have set
+                */
                wil6210_clear_irq(wil);
-               { /* CAF_ICR - clear and mask */
-                       u32 a = HOSTADDR(RGF_CAF_ICR) +
-                               offsetof(struct RGF_ICR, ICR);
-                       u32 m = HOSTADDR(RGF_CAF_ICR) +
-                               offsetof(struct RGF_ICR, IMV);
-                       u32 icr = ioread32(wil->csr + a);
-
-                       iowrite32(icr, wil->csr + a); /* W1C */
-                       iowrite32(~0, wil->csr + m);
-                       wmb(); /* wait for completion */
-               }
+               /* CAF_ICR - clear and mask */
+               /* it is W1C, clear by writing back same value */
+               S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+               W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+
                wil_release_cpu(wil);
-       } else {
-               wil_info(wil, "Use firmware from on-card flash\n");
        }
 
        /* init after reset */
@@ -706,15 +717,22 @@ int wil_reset(struct wil6210_priv *wil)
        reinit_completion(&wil->wmi_ready);
        reinit_completion(&wil->wmi_call);
 
-       wil_configure_interrupt_moderation(wil);
-       wil_unmask_irq(wil);
+       if (load_fw) {
+               wil_configure_interrupt_moderation(wil);
+               wil_unmask_irq(wil);
 
-       /* we just started MAC, wait for FW ready */
-       rc = wil_wait_for_fw_ready(wil);
+               /* we just started MAC, wait for FW ready */
+               rc = wil_wait_for_fw_ready(wil);
+       }
 
        return rc;
 }
 
+#undef R
+#undef W
+#undef S
+#undef C
+
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
        wil_dbg_misc(wil, "starting fw error recovery\n");
@@ -730,7 +748,7 @@ int __wil_up(struct wil6210_priv *wil)
 
        WARN_ON(!mutex_is_locked(&wil->mutex));
 
-       rc = wil_reset(wil);
+       rc = wil_reset(wil, true);
        if (rc)
                return rc;
 
@@ -837,7 +855,7 @@ int __wil_down(struct wil6210_priv *wil)
        if (!iter)
                wil_err(wil, "timeout waiting for idle FW/HW\n");
 
-       wil_rx_fini(wil);
+       wil_reset(wil, false);
 
        return 0;
 }
index 3dd26709ccb29a075ac6f21c6a4626fc22718af6..25343cffe229e08fc45f0f582202f3f9ff6bce07 100644 (file)
@@ -39,18 +39,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
 
        switch (rev_id) {
-       case JTAG_DEV_ID_MARLON_B0:
-               wil->hw_name = "Marlon B0";
-               wil->hw_version = HW_VER_MARLON_B0;
-               break;
-       case JTAG_DEV_ID_SPARROW_A0:
-               wil->hw_name = "Sparrow A0";
-               wil->hw_version = HW_VER_SPARROW_A0;
-               break;
-       case JTAG_DEV_ID_SPARROW_A1:
-               wil->hw_name = "Sparrow A1";
-               wil->hw_version = HW_VER_SPARROW_A1;
-               break;
        case JTAG_DEV_ID_SPARROW_B0:
                wil->hw_name = "Sparrow B0";
                wil->hw_version = HW_VER_SPARROW_B0;
@@ -62,13 +50,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        }
 
        wil_info(wil, "Board hardware is %s\n", wil->hw_name);
-
-       if (wil->hw_version >= HW_VER_SPARROW_A0)
-               set_bit(hw_capability_reset_v2, wil->hw_capabilities);
-
-       if (wil->hw_version >= HW_VER_SPARROW_B0)
-               set_bit(hw_capability_advanced_itr_moderation,
-                       wil->hw_capabilities);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -150,7 +131,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
 
        /* need reset here to obtain MAC */
        mutex_lock(&wil->mutex);
-       rc = wil_reset(wil);
+       rc = wil_reset(wil, false);
        mutex_unlock(&wil->mutex);
        if (debug_fw)
                rc = 0;
@@ -305,7 +286,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 }
 
 static const struct pci_device_id wil6210_pcie_ids[] = {
-       { PCI_DEVICE(0x1ae9, 0x0301) },
        { PCI_DEVICE(0x1ae9, 0x0310) },
        { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
        { /* end: all zeroes */ },
index 8439f65db259728b35586769d82a428b51d3aeae..7f2f560b86382827276cc66215aa6d83116a7fc7 100644 (file)
@@ -53,34 +53,38 @@ static inline int wil_vring_is_full(struct vring *vring)
        return wil_vring_next_tail(vring) == vring->swhead;
 }
 
-/*
- * Available space in Tx Vring
- */
-static inline int wil_vring_avail_tx(struct vring *vring)
+/* Used space in Tx Vring */
+static inline int wil_vring_used_tx(struct vring *vring)
 {
        u32 swhead = vring->swhead;
        u32 swtail = vring->swtail;
-       int used = (vring->size + swhead - swtail) % vring->size;
+       return (vring->size + swhead - swtail) % vring->size;
+}
 
-       return vring->size - used - 1;
+/* Available space in Tx Vring */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+       return vring->size - wil_vring_used_tx(vring) - 1;
 }
 
-/**
- * wil_vring_wmark_low - low watermark for available descriptor space
- */
+/* wil_vring_wmark_low - low watermark for available descriptor space */
 static inline int wil_vring_wmark_low(struct vring *vring)
 {
        return vring->size/8;
 }
 
-/**
- * wil_vring_wmark_high - high watermark for available descriptor space
- */
+/* wil_vring_wmark_high - high watermark for available descriptor space */
 static inline int wil_vring_wmark_high(struct vring *vring)
 {
        return vring->size/4;
 }
 
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+       return val >= min && val < max;
+}
+
 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 {
        struct device *dev = wil_to_dev(wil);
@@ -98,8 +102,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
                vring->va = NULL;
                return -ENOMEM;
        }
-       /*
-        * vring->va should be aligned on its size rounded up to power of 2
+       /* vring->va should be aligned on its size rounded up to power of 2
         * This is granted by the dma_alloc_coherent
         */
        vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -346,27 +349,6 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
        }
 }
 
-/*
- * Fast swap in place between 2 registers
- */
-static void wil_swap_u16(u16 *a, u16 *b)
-{
-       *a ^= *b;
-       *b ^= *a;
-       *a ^= *b;
-}
-
-static void wil_swap_ethaddr(void *data)
-{
-       struct ethhdr *eth = data;
-       u16 *s = (u16 *)eth->h_source;
-       u16 *d = (u16 *)eth->h_dest;
-
-       wil_swap_u16(s++, d++);
-       wil_swap_u16(s++, d++);
-       wil_swap_u16(s, d);
-}
-
 /**
  * reap 1 frame from @swhead
  *
@@ -386,17 +368,16 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        unsigned int sz = mtu_max + ETH_HLEN;
        u16 dmalen;
        u8 ftype;
-       u8 ds_bits;
        int cid;
        struct wil_net_stats *stats;
 
        BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
 
-       if (wil_vring_is_empty(vring))
+       if (unlikely(wil_vring_is_empty(vring)))
                return NULL;
 
        _d = &vring->va[vring->swhead].rx;
-       if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
+       if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
                /* it is not error, we just reached end of Rx done area */
                return NULL;
        }
@@ -416,7 +397,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
-       if (dmalen > sz) {
+       if (unlikely(dmalen > sz)) {
                wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
                kfree_skb(skb);
                return NULL;
@@ -445,14 +426,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
         * in Rx descriptor. If type is not data, it is 802.11 frame as is
         */
        ftype = wil_rxdesc_ftype(d) << 2;
-       if (ftype != IEEE80211_FTYPE_DATA) {
+       if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
                wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
                /* TODO: process it */
                kfree_skb(skb);
                return NULL;
        }
 
-       if (skb->len < ETH_HLEN) {
+       if (unlikely(skb->len < ETH_HLEN)) {
                wil_err(wil, "Short frame, len = %d\n", skb->len);
                /* TODO: process it (i.e. BAR) */
                kfree_skb(skb);
@@ -463,9 +444,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
         * and in case of error drop the packet
         * higher stack layers will handle retransmission (if required)
         */
-       if (d->dma.status & RX_DMA_STATUS_L4I) {
+       if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
                /* L4 protocol identified, csum calculated */
-               if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
+               if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                /* If HW reports bad checksum, let IP stack re-check it
                 * For example, HW don't understand Microsoft IP stack that
@@ -474,15 +455,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                 */
        }
 
-       ds_bits = wil_rxdesc_ds_bits(d);
-       if (ds_bits == 1) {
-               /*
-                * HW bug - in ToDS mode, i.e. Rx on AP side,
-                * addresses get swapped
-                */
-               wil_swap_ethaddr(skb->data);
-       }
-
        return skb;
 }
 
@@ -503,7 +475,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
                        (next_tail != v->swhead) && (count-- > 0);
                        v->swtail = next_tail) {
                rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
-               if (rc) {
+               if (unlikely(rc)) {
                        wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
                                rc, v->swtail);
                        break;
@@ -565,7 +537,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
        struct vring *v = &wil->vring_rx;
        struct sk_buff *skb;
 
-       if (!v->va) {
+       if (unlikely(!v->va)) {
                wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
                return;
        }
@@ -952,13 +924,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
        uint i = swhead;
        dma_addr_t pa;
+       int used;
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
 
        if (unlikely(!txdata->enabled))
                return -EINVAL;
 
-       if (avail < 1 + nr_frags) {
+       if (unlikely(avail < 1 + nr_frags)) {
                wil_err_ratelimited(wil,
                                    "Tx ring[%2d] full. No space for %d fragments\n",
                                    vring_index, 1 + nr_frags);
@@ -979,7 +952,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        /* 1-st segment */
        wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
        /* Process TCP/UDP checksum offloading */
-       if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
+       if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
                wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
                        vring_index);
                goto dma_error;
@@ -1027,8 +1000,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
         */
        vring->ctx[i].skb = skb_get(skb);
 
-       if (wil_vring_is_empty(vring)) /* performance monitoring */
+       /* performance monitoring */
+       used = wil_vring_used_tx(vring);
+       if (wil_val_in_range(vring_idle_trsh,
+                            used, used + nr_frags + 1)) {
                txdata->idle += get_cycles() - txdata->last_idle;
+               wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+                            vring_index, used, used + nr_frags + 1);
+       }
 
        /* advance swhead */
        wil_vring_advance_head(vring, nr_frags + 1);
@@ -1082,18 +1061,18 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        int rc;
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
-       if (!test_bit(wil_status_fwready, wil->status)) {
+       if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
                if (!pr_once_fw) {
                        wil_err(wil, "FW not ready\n");
                        pr_once_fw = true;
                }
                goto drop;
        }
-       if (!test_bit(wil_status_fwconnected, wil->status)) {
+       if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
                wil_err(wil, "FW not connected\n");
                goto drop;
        }
-       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+       if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
                wil_err(wil, "Xmit in monitor mode not supported\n");
                goto drop;
        }
@@ -1109,7 +1088,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                else
                        vring = wil_tx_bcast(wil, skb);
        }
-       if (!vring) {
+       if (unlikely(!vring)) {
                wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
                goto drop;
        }
@@ -1117,7 +1096,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        rc = wil_tx_vring(wil, vring, skb);
 
        /* do we still have enough room in the vring? */
-       if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) {
+       if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
                netif_tx_stop_all_queues(wil_to_ndev(wil));
                wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
        }
@@ -1172,19 +1151,23 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
        int cid = wil->vring2cid_tid[ringid][0];
        struct wil_net_stats *stats = &wil->sta[cid].stats;
        volatile struct vring_tx_desc *_d;
+       int used_before_complete;
+       int used_new;
 
-       if (!vring->va) {
+       if (unlikely(!vring->va)) {
                wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
                return 0;
        }
 
-       if (!txdata->enabled) {
+       if (unlikely(!txdata->enabled)) {
                wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
                return 0;
        }
 
        wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
 
+       used_before_complete = wil_vring_used_tx(vring);
+
        while (!wil_vring_is_empty(vring)) {
                int new_swtail;
                struct wil_ctx *ctx = &vring->ctx[vring->swtail];
@@ -1196,7 +1179,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                /* TODO: check we are not past head */
 
                _d = &vring->va[lf].tx;
-               if (!(_d->dma.status & TX_DMA_STATUS_DU))
+               if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
                        break;
 
                new_swtail = (lf + 1) % vring->size;
@@ -1224,7 +1207,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                        wil_txdesc_unmap(dev, d, ctx);
 
                        if (skb) {
-                               if (d->dma.error == 0) {
+                               if (likely(d->dma.error == 0)) {
                                        ndev->stats.tx_packets++;
                                        stats->tx_packets++;
                                        ndev->stats.tx_bytes += skb->len;
@@ -1246,8 +1229,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                }
        }
 
-       if (wil_vring_is_empty(vring)) { /* performance monitoring */
-               wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid);
+       /* performance monitoring */
+       used_new = wil_vring_used_tx(vring);
+       if (wil_val_in_range(vring_idle_trsh,
+                            used_new, used_before_complete)) {
+               wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+                            ringid, used_before_complete, used_new);
                txdata->last_idle = get_cycles();
        }
 
index 94611568fc9ab3384128ebbe77d066ba16502985..b6e65c37d410eccfb93ed51e56b52b3fdc192b8b 100644 (file)
@@ -27,9 +27,11 @@ extern bool no_fw_recovery;
 extern unsigned int mtu_max;
 extern unsigned short rx_ring_overflow_thrsh;
 extern int agg_wsize;
+extern u32 vring_idle_trsh;
 
 #define WIL_NAME "wil6210"
-#define WIL_FW_NAME "wil6210.fw"
+#define WIL_FW_NAME "wil6210.fw" /* code */
+#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
 
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
@@ -120,6 +122,16 @@ struct RGF_ICR {
        u32 IMC; /* Mask Clear, write 1 to clear */
 } __packed;
 
+struct RGF_BL {
+       u32 ready;              /* 0x880A3C bit [0] */
+#define BIT_BL_READY   BIT(0)
+       u32 version;            /* 0x880A40 version of the BL struct */
+       u32 rf_type;            /* 0x880A44 ID of the connected RF */
+       u32 baseband_type;      /* 0x880A48 ID of the baseband */
+       u8  mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
+       u8 pad[2];
+} __packed;
+
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1               (0x880004)
 #define RGF_USER_USAGE_6               (0x880018)
@@ -130,6 +142,7 @@ struct RGF_ICR {
 #define RGF_USER_MAC_CPU_0             (0x8801fc)
        #define BIT_USER_MAC_CPU_MAN_RST        BIT(1) /* mac_cpu_man_rst */
 #define RGF_USER_USER_SCRATCH_PAD      (0x8802bc)
+#define RGF_USER_BL                    (0x880A3C) /* Boot Loader */
 #define RGF_USER_FW_REV_ID             (0x880a8c) /* chip revision */
 #define RGF_USER_CLKS_CTL_0            (0x880abc)
        #define BIT_USER_CLKS_CAR_AHB_SW_SEL    BIT(1) /* ref clk/PLL */
@@ -169,6 +182,13 @@ struct RGF_ICR {
        #define BIT_DMA_ITR_CNT_CRL_CLR         BIT(3)
        #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH  BIT(4)
 
+/* Offload control (Sparrow B0+) */
+#define RGF_DMA_OFUL_NID_0             (0x881cd4)
+       #define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN         BIT(0)
+       #define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN         BIT(1)
+       #define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC        BIT(2)
+       #define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC        BIT(3)
+
 /* New (sparrow v2+) interrupt moderation control */
 #define RGF_DMA_ITR_TX_DESQ_NO_MOD             (0x881d40)
 #define RGF_DMA_ITR_TX_CNT_TRSH                        (0x881d34)
@@ -229,16 +249,10 @@ struct RGF_ICR {
        #define BIT_CAF_OSC_DIG_XTAL_STABLE     BIT(0)
 
 #define RGF_USER_JTAG_DEV_ID   (0x880b34) /* device ID */
-       #define JTAG_DEV_ID_MARLON_B0   (0x0612072f)
-       #define JTAG_DEV_ID_SPARROW_A0  (0x0632072f)
-       #define JTAG_DEV_ID_SPARROW_A1  (0x1632072f)
        #define JTAG_DEV_ID_SPARROW_B0  (0x2632072f)
 
 enum {
        HW_VER_UNKNOWN,
-       HW_VER_MARLON_B0,  /* JTAG_DEV_ID_MARLON_B0  */
-       HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */
-       HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */
        HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
 };
 
@@ -482,8 +496,6 @@ enum {
 };
 
 enum {
-       hw_capability_reset_v2 = 0,
-       hw_capability_advanced_itr_moderation = 1,
        hw_capability_last
 };
 
@@ -528,7 +540,7 @@ struct wil6210_priv {
        wait_queue_head_t wq; /* for all wait_event() use */
        /* profile */
        u32 monitor_flags;
-       u32 secure_pcp; /* create secure PCP? */
+       u32 privacy; /* secure connection? */
        int sinfo_gen;
        /* interrupt moderation */
        u32 tx_max_burst_duration;
@@ -658,7 +670,7 @@ int wil_if_add(struct wil6210_priv *wil);
 void wil_if_remove(struct wil6210_priv *wil);
 int wil_priv_init(struct wil6210_priv *wil);
 void wil_priv_deinit(struct wil6210_priv *wil);
-int wil_reset(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil, bool no_fw);
 void wil_fw_error_recovery(struct wil6210_priv *wil);
 void wil_set_recovery_state(struct wil6210_priv *wil, int state);
 int wil_up(struct wil6210_priv *wil);
index 0f3e4334c8e3e6858cfa17dcefab7c87dd45e45c..0213135249137d873627c35c3bd102c954bdfd62 100644 (file)
@@ -281,7 +281,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 /*=== Event handlers ===*/
 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 {
-       struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *wdev = wil->wdev;
        struct wmi_ready_event *evt = d;
 
@@ -290,11 +289,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 
        wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
                 evt->mac, wil->n_mids);
-
-       if (!is_valid_ether_addr(ndev->dev_addr)) {
-               memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
-               memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
-       }
+       /* ignore MAC address, we already have it from the boot loader */
        snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
                 "%d", wil->fw_version);
 }
@@ -879,7 +874,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
                struct wmi_pcp_started_event evt;
        } __packed reply;
 
-       if (!wil->secure_pcp)
+       if (!wil->privacy)
                cmd.disable_sec = 1;
 
        if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
index 55db9f03eb2a3f25d702f88b7bde5b7d16b64441..6a1f03c271c1c04074ec76fcad031f5bce954382 100644 (file)
@@ -1004,7 +1004,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, ETH_ALEN);
+                               eth_broadcast_addr(priv->frag_source);
                        }
                }
 
@@ -1022,7 +1022,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, ETH_ALEN);
+                               eth_broadcast_addr(priv->frag_source);
                                more_frags = 1; /* don't send broken assembly */
                        }
                }
@@ -1031,7 +1031,7 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_no++;
 
                if (!more_frags) { /* last one */
-                       memset(priv->frag_source, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(priv->frag_source);
                        if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
                                priv->dev->stats.rx_dropped++;
                        } else {
@@ -1127,7 +1127,7 @@ static void rx_done_irq(struct atmel_private *priv)
                        atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 
                        /* we use the same buffer for frag reassembly and control packets */
-                       memset(priv->frag_source, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(priv->frag_source);
 
                        if (priv->do_rx_crc) {
                                /* last 4 octets is crc */
@@ -1379,7 +1379,7 @@ static int atmel_close(struct net_device *dev)
                wrqu.data.length = 0;
                wrqu.data.flags = 0;
                wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
                wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
        }
 
@@ -1555,7 +1555,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
        priv->last_qual = jiffies;
        priv->last_beacon_timestamp = 0;
        memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
-       memset(priv->BSSID, 0, ETH_ALEN);
+       eth_zero_addr(priv->BSSID);
        priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
        priv->station_was_associated = 0;
 
@@ -2760,7 +2760,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
                u8 SSID_size;
        } cmd;
 
-       memset(cmd.BSSID, 0xff, ETH_ALEN);
+       eth_broadcast_addr(cmd.BSSID);
 
        if (priv->fast_scan) {
                cmd.SSID_size = priv->SSID_size;
@@ -4049,7 +4049,7 @@ static int reset_atmel_card(struct net_device *dev)
                wrqu.data.length = 0;
                wrqu.data.flags = 0;
                wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
                wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
        }
 
index ccbdb05b28cd7e2dc457afe9443e35ac2ca3fc21..ac99798570e8d8dfdfcf3260312c48376f74903e 100644 (file)
@@ -4132,7 +4132,7 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
                if (conf->bssid)
                        memcpy(wl->bssid, conf->bssid, ETH_ALEN);
                else
-                       memset(wl->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(wl->bssid);
        }
 
        if (b43_status(dev) >= B43_STAT_INITIALIZED) {
@@ -4819,7 +4819,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
        case B43_BUS_BCMA:
-               bcma_core_pci_down(dev->dev->bdev->bus);
+               bcma_host_pci_down(dev->dev->bdev->bus);
                break;
 #endif
 #ifdef CONFIG_B43_SSB
@@ -4866,9 +4866,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
        case B43_BUS_BCMA:
-               bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
+               bcma_core_pci_irq_ctl(dev->dev->bdev->bus,
                                      dev->dev->bdev, true);
-               bcma_core_pci_up(dev->dev->bdev->bus);
+               bcma_host_pci_up(dev->dev->bdev->bus);
                break;
 #endif
 #ifdef CONFIG_B43_SSB
@@ -5051,7 +5051,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
        wl->operating = false;
 
        b43_adjust_opmode(dev);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->mac_addr);
        b43_upload_card_macaddress(dev);
 
        mutex_unlock(&wl->mutex);
@@ -5067,8 +5067,8 @@ static int b43_op_start(struct ieee80211_hw *hw)
        /* Kill all old instance specific information to make sure
         * the card won't use it in the short timeframe between start
         * and mac80211 reconfiguring it. */
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
+       eth_zero_addr(wl->mac_addr);
        wl->filter_flags = 0;
        wl->radiotap_enabled = false;
        b43_qos_clear(wl);
index 4e58c0069830b698b0689203172a9a8f4d458e4b..c77b7f59505cc2eb95c13219e04b18862e42ae72 100644 (file)
@@ -2866,7 +2866,7 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
                if (conf->bssid)
                        memcpy(wl->bssid, conf->bssid, ETH_ALEN);
                else
-                       memset(wl->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(wl->bssid);
        }
 
        if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
@@ -3470,7 +3470,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
 
        spin_lock_irqsave(&wl->irq_lock, flags);
        b43legacy_adjust_opmode(dev);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->mac_addr);
        b43legacy_upload_card_macaddress(dev);
        spin_unlock_irqrestore(&wl->irq_lock, flags);
 
@@ -3487,8 +3487,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
        /* Kill all old instance specific information to make sure
         * the card won't use it in the short timeframe between start
         * and mac80211 reconfiguring it. */
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
+       eth_zero_addr(wl->mac_addr);
        wl->filter_flags = 0;
        wl->beacon0_uploaded = false;
        wl->beacon1_uploaded = false;
index 7944224e3fc90140deb61ae061f4296a555ac526..c438ccdb6ed8215ef0c1470adde8522c1e355944 100644 (file)
 #define BRCMF_DEFAULT_TXGLOM_SIZE      32  /* max tx frames in glom chain */
 #define BRCMF_DEFAULT_RXGLOM_SIZE      32  /* max rx frames in glom chain */
 
+struct brcmf_sdiod_freezer {
+       atomic_t freezing;
+       atomic_t thread_count;
+       u32 frozen_count;
+       wait_queue_head_t thread_freeze;
+       struct completion resumed;
+};
+
 static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
 module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
 MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
@@ -197,6 +205,30 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
        return 0;
 }
 
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+                             enum brcmf_sdiod_state state)
+{
+       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
+           state == sdiodev->state)
+               return;
+
+       brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
+       switch (sdiodev->state) {
+       case BRCMF_SDIOD_DATA:
+               /* any other state means bus interface is down */
+               brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+               break;
+       case BRCMF_SDIOD_DOWN:
+               /* transition from DOWN to DATA means bus interface is up */
+               if (state == BRCMF_SDIOD_DATA)
+                       brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
+               break;
+       default:
+               break;
+       }
+       sdiodev->state = state;
+}
+
 static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
                                        uint regaddr, u8 byte)
 {
@@ -269,12 +301,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
        return ret;
 }
 
-static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev)
-{
-       sdiodev->state = BRCMF_STATE_NOMEDIUM;
-       brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-}
-
 static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                                   u8 regsz, void *data, bool write)
 {
@@ -282,7 +308,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
        s32 retry = 0;
        int ret;
 
-       if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
+       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
                return -ENOMEDIUM;
 
        /*
@@ -308,7 +334,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
 
        if (ret == -ENOMEDIUM)
-               brcmf_sdiod_nomedium_state(sdiodev);
+               brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
        else if (ret != 0) {
                /*
                 * SleepCSR register access can fail when
@@ -331,7 +357,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
        int err = 0, i;
        u8 addr[3];
 
-       if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
+       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
                return -ENOMEDIUM;
 
        addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
@@ -460,7 +486,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
                err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
                                  req_sz);
        if (err == -ENOMEDIUM)
-               brcmf_sdiod_nomedium_state(sdiodev);
+               brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
        return err;
 }
 
@@ -595,7 +621,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 
                ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
                if (ret == -ENOMEDIUM) {
-                       brcmf_sdiod_nomedium_state(sdiodev);
+                       brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
                        break;
                } else if (ret != 0) {
                        brcmf_err("CMD53 sg block %s failed %d\n",
@@ -877,6 +903,87 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
        sdiodev->txglomsz = brcmf_sdiod_txglomsz;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+       sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
+       if (!sdiodev->freezer)
+               return -ENOMEM;
+       atomic_set(&sdiodev->freezer->thread_count, 0);
+       atomic_set(&sdiodev->freezer->freezing, 0);
+       init_waitqueue_head(&sdiodev->freezer->thread_freeze);
+       init_completion(&sdiodev->freezer->resumed);
+       return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+       if (sdiodev->freezer) {
+               WARN_ON(atomic_read(&sdiodev->freezer->freezing));
+               kfree(sdiodev->freezer);
+       }
+}
+
+static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
+{
+       atomic_t *expect = &sdiodev->freezer->thread_count;
+       int res = 0;
+
+       sdiodev->freezer->frozen_count = 0;
+       reinit_completion(&sdiodev->freezer->resumed);
+       atomic_set(&sdiodev->freezer->freezing, 1);
+       brcmf_sdio_trigger_dpc(sdiodev->bus);
+       wait_event(sdiodev->freezer->thread_freeze,
+                  atomic_read(expect) == sdiodev->freezer->frozen_count);
+       sdio_claim_host(sdiodev->func[1]);
+       res = brcmf_sdio_sleep(sdiodev->bus, true);
+       sdio_release_host(sdiodev->func[1]);
+       return res;
+}
+
+static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
+{
+       sdio_claim_host(sdiodev->func[1]);
+       brcmf_sdio_sleep(sdiodev->bus, false);
+       sdio_release_host(sdiodev->func[1]);
+       atomic_set(&sdiodev->freezer->freezing, 0);
+       complete_all(&sdiodev->freezer->resumed);
+}
+
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+       return atomic_read(&sdiodev->freezer->freezing);
+}
+
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+       if (!brcmf_sdiod_freezing(sdiodev))
+               return;
+       sdiodev->freezer->frozen_count++;
+       wake_up(&sdiodev->freezer->thread_freeze);
+       wait_for_completion(&sdiodev->freezer->resumed);
+}
+
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+       atomic_inc(&sdiodev->freezer->thread_count);
+}
+
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+       atomic_dec(&sdiodev->freezer->thread_count);
+}
+#else
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+       return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
+
 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
        if (sdiodev->bus) {
@@ -884,6 +991,8 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
                sdiodev->bus = NULL;
        }
 
+       brcmf_sdiod_freezer_detach(sdiodev);
+
        /* Disable Function 2 */
        sdio_claim_host(sdiodev->func[2]);
        sdio_disable_func(sdiodev->func[2]);
@@ -955,6 +1064,10 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
         */
        brcmf_sdiod_sgtable_alloc(sdiodev);
 
+       ret = brcmf_sdiod_freezer_attach(sdiodev);
+       if (ret)
+               goto out;
+
        /* try to attach to the target device */
        sdiodev->bus = brcmf_sdio_probe(sdiodev);
        if (!sdiodev->bus) {
@@ -1050,9 +1163,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
                bus_if->wowl_supported = true;
 #endif
 
-       sdiodev->sleeping = false;
-       atomic_set(&sdiodev->suspend, false);
-       init_waitqueue_head(&sdiodev->idle_wait);
+       brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
 
        brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
        err = brcmf_sdiod_probe(sdiodev);
@@ -1114,24 +1225,22 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
 #ifdef CONFIG_PM_SLEEP
 static int brcmf_ops_sdio_suspend(struct device *dev)
 {
+       struct sdio_func *func;
        struct brcmf_bus *bus_if;
        struct brcmf_sdio_dev *sdiodev;
        mmc_pm_flag_t sdio_flags;
 
-       brcmf_dbg(SDIO, "Enter\n");
+       func = container_of(dev, struct sdio_func, dev);
+       brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+       if (func->num != SDIO_FUNC_1)
+               return 0;
+
 
        bus_if = dev_get_drvdata(dev);
        sdiodev = bus_if->bus_priv.sdio;
 
-       /* wait for watchdog to go idle */
-       if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping,
-                              msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) {
-               brcmf_err("bus still active\n");
-               return -EBUSY;
-       }
-       /* disable watchdog */
+       brcmf_sdiod_freezer_on(sdiodev);
        brcmf_sdio_wd_timer(sdiodev->bus, 0);
-       atomic_set(&sdiodev->suspend, true);
 
        if (sdiodev->wowl_enabled) {
                sdio_flags = MMC_PM_KEEP_POWER;
@@ -1149,12 +1258,13 @@ static int brcmf_ops_sdio_resume(struct device *dev)
 {
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+       struct sdio_func *func = container_of(dev, struct sdio_func, dev);
 
-       brcmf_dbg(SDIO, "Enter\n");
-       if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
-               disable_irq_wake(sdiodev->pdata->oob_irq_nr);
-       brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
-       atomic_set(&sdiodev->suspend, false);
+       brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+       if (func->num != SDIO_FUNC_2)
+               return 0;
+
+       brcmf_sdiod_freezer_off(sdiodev);
        return 0;
 }
 
index b59b8c6c42abeb1e17834b1dcd9af4a51b354b4e..9b805c9fd51eb3b474d2ebdac942ba1b8c27645b 100644 (file)
@@ -700,7 +700,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
                /* Do a scan abort to stop the driver's scan engine */
                brcmf_dbg(SCAN, "ABORT scan in firmware\n");
                memset(&params_le, 0, sizeof(params_le));
-               memset(params_le.bssid, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(params_le.bssid);
                params_le.bss_type = DOT11_BSSTYPE_ANY;
                params_le.scan_type = 0;
                params_le.channel_num = cpu_to_le32(1);
@@ -866,7 +866,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
        char *ptr;
        struct brcmf_ssid_le ssid_le;
 
-       memset(params_le->bssid, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(params_le->bssid);
        params_le->bss_type = DOT11_BSSTYPE_ANY;
        params_le->scan_type = 0;
        params_le->channel_num = 0;
@@ -1050,10 +1050,6 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
        if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
                vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
 
-       /* Arm scan timeout timer */
-       mod_timer(&cfg->escan_timeout, jiffies +
-                       WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
-
        escan_req = false;
        if (request) {
                /* scan bss */
@@ -1112,12 +1108,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
                }
        }
 
+       /* Arm scan timeout timer */
+       mod_timer(&cfg->escan_timeout, jiffies +
+                       WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
        return 0;
 
 scan_out:
        clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
-       if (timer_pending(&cfg->escan_timeout))
-               del_timer_sync(&cfg->escan_timeout);
        cfg->scan_request = NULL;
        return err;
 }
@@ -1375,8 +1373,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                                   BRCMF_ASSOC_PARAMS_FIXED_SIZE;
                memcpy(profile->bssid, params->bssid, ETH_ALEN);
        } else {
-               memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
-               memset(profile->bssid, 0, ETH_ALEN);
+               eth_broadcast_addr(join_params.params_le.bssid);
+               eth_zero_addr(profile->bssid);
        }
 
        /* Channel */
@@ -1850,7 +1848,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        if (sme->bssid)
                memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
        else
-               memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(ext_join_params->assoc_le.bssid);
 
        if (cfg->channel) {
                ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
@@ -1895,7 +1893,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        if (sme->bssid)
                memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
        else
-               memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(join_params.params_le.bssid);
 
        if (cfg->channel) {
                join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
@@ -2252,7 +2250,6 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
 
        if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
                /* we ignore this key index in this case */
-               brcmf_err("invalid key index (%d)\n", key_idx);
                return -EINVAL;
        }
 
@@ -4272,7 +4269,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
                return -EIO;
 
        memcpy(&scbval.ea, params->mac, ETH_ALEN);
-       scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
+       scbval.val = cpu_to_le32(params->reason_code);
        err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
                                     &scbval, sizeof(scbval));
        if (err)
index 2d6e2cc1b12ce98c6fd8a3039260d56c50070636..f8f47dcfa886278caf5d694bc9d7a0eadadb8ccd 100644 (file)
@@ -944,6 +944,34 @@ fail:
        return ret;
 }
 
+static int brcmf_revinfo_read(struct seq_file *s, void *data)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
+       struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
+       char drev[BRCMU_DOTREV_LEN];
+       char brev[BRCMU_BOARDREV_LEN];
+
+       seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
+       seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
+       seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
+       seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
+       seq_printf(s, "chiprev: %u\n", ri->chiprev);
+       seq_printf(s, "chippkg: %u\n", ri->chippkg);
+       seq_printf(s, "corerev: %u\n", ri->corerev);
+       seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
+       seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
+       seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
+       seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
+       seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
+       seq_printf(s, "bus: %u\n", ri->bus);
+       seq_printf(s, "phytype: %u\n", ri->phytype);
+       seq_printf(s, "phyrev: %u\n", ri->phyrev);
+       seq_printf(s, "anarev: %u\n", ri->anarev);
+       seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
+
+       return 0;
+}
+
 int brcmf_bus_start(struct device *dev)
 {
        int ret = -1;
@@ -974,6 +1002,8 @@ int brcmf_bus_start(struct device *dev)
        if (ret < 0)
                goto fail;
 
+       brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
+
        /* assure we have chipid before feature attach */
        if (!bus_if->chip) {
                bus_if->chip = drvr->revinfo.chipnum;
index 910fbb561469e80b46147f8c18cbb14f3cd85e14..eb1325371d3a3aa9eba03e5ba83e65135f218f4e 100644 (file)
@@ -236,7 +236,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
        brcmf_flowring_block(flow, flowid, false);
        hash_idx = ring->hash_id;
        flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
-       memset(flow->hash[hash_idx].mac, 0, ETH_ALEN);
+       eth_zero_addr(flow->hash[hash_idx].mac);
        flow->rings[flowid] = NULL;
 
        skb = skb_dequeue(&ring->skblist);
index effb48ebd86450c41d7a3a4d46b85df4f049b946..98d82ec52de1d571af8f5645e7f2d266c6d09dc8 100644 (file)
@@ -697,7 +697,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
        else
                sparams->scan_type = 1;
 
-       memset(&sparams->bssid, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(sparams->bssid);
        if (ssid.SSID_len)
                memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
        sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
index faec35c899ec1fb50b67041dd1539d39319902eb..257ee70feb5b143d8e9b5acbf59115f917bc072f 100644 (file)
@@ -515,6 +515,7 @@ struct brcmf_sdio {
        bool txoff;             /* Transmit flow-controlled */
        struct brcmf_sdio_count sdcnt;
        bool sr_enabled; /* SaveRestore enabled */
+       bool sleeping;
 
        u8 tx_hdrlen;           /* sdio bus header length for tx packet */
        bool txglom;            /* host tx glomming enable flag */
@@ -1013,12 +1014,12 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 
        brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
                  (sleep ? "SLEEP" : "WAKE"),
-                 (bus->sdiodev->sleeping ? "SLEEP" : "WAKE"));
+                 (bus->sleeping ? "SLEEP" : "WAKE"));
 
        /* If SR is enabled control bus state with KSO */
        if (bus->sr_enabled) {
                /* Done if we're already in the requested state */
-               if (sleep == bus->sdiodev->sleeping)
+               if (sleep == bus->sleeping)
                        goto end;
 
                /* Going to sleep */
@@ -1026,6 +1027,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
                        /* Don't sleep if something is pending */
                        if (atomic_read(&bus->intstatus) ||
                            atomic_read(&bus->ipend) > 0 ||
+                           bus->ctrl_frame_stat ||
                            (!atomic_read(&bus->fcstate) &&
                            brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
                            data_ok(bus))) {
@@ -1065,9 +1067,7 @@ end:
        } else {
                brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
        }
-       bus->sdiodev->sleeping = sleep;
-       if (sleep)
-               wake_up(&bus->sdiodev->idle_wait);
+       bus->sleeping = sleep;
        brcmf_dbg(SDIO, "new state %s\n",
                  (sleep ? "SLEEP" : "WAKE"));
 done:
@@ -1909,7 +1909,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
        bus->rxpending = true;
 
        for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
-            !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA;
+            !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
             rd->seq_num++, rxleft--) {
 
                /* Handle glomming separately */
@@ -2415,7 +2415,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        }
 
        /* Deflow-control stack if needed */
-       if ((bus->sdiodev->state == BRCMF_STATE_DATA) &&
+       if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
            bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
                bus->txoff = false;
                brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2503,7 +2503,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
                bus->watchdog_tsk = NULL;
        }
 
-       if (sdiodev->state != BRCMF_STATE_NOMEDIUM) {
+       if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
                sdio_claim_host(sdiodev->func[1]);
 
                /* Enable clock for device interrupts */
@@ -2603,21 +2603,6 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
        return ret;
 }
 
-static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev)
-{
-#ifdef CONFIG_PM_SLEEP
-       int retry;
-
-       /* Wait for possible resume to complete */
-       retry = 0;
-       while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50))
-               msleep(20);
-       if (atomic_read(&sdiodev->suspend))
-               return -EIO;
-#endif
-       return 0;
-}
-
 static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
        u32 newstatus = 0;
@@ -2628,9 +2613,6 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       if (brcmf_sdio_pm_resume_wait(bus->sdiodev))
-               return;
-
        sdio_claim_host(bus->sdiodev->func[1]);
 
        /* If waiting for HTAVAIL, check status */
@@ -2755,7 +2737,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
                brcmf_sdio_sendfromq(bus, framecnt);
        }
 
-       if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) {
+       if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
                brcmf_err("failed backplane access over SDIO, halting operation\n");
                atomic_set(&bus->intstatus, 0);
        } else if (atomic_read(&bus->intstatus) ||
@@ -2862,11 +2844,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
                qcount[prec] = pktq_plen(&bus->txq, prec);
 #endif
 
-       if (atomic_read(&bus->dpc_tskcnt) == 0) {
-               atomic_inc(&bus->dpc_tskcnt);
-               queue_work(bus->brcmf_wq, &bus->datawork);
-       }
-
+       brcmf_sdio_trigger_dpc(bus);
        return ret;
 }
 
@@ -2964,11 +2942,8 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        bus->ctrl_frame_buf = msg;
        bus->ctrl_frame_len = msglen;
        bus->ctrl_frame_stat = true;
-       if (atomic_read(&bus->dpc_tskcnt) == 0) {
-               atomic_inc(&bus->dpc_tskcnt);
-               queue_work(bus->brcmf_wq, &bus->datawork);
-       }
 
+       brcmf_sdio_trigger_dpc(bus);
        wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
                                         msecs_to_jiffies(CTL_DONE_TIMEOUT));
 
@@ -3411,7 +3386,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
        }
 
        /* Allow full data communication using DPC from now on. */
-       bus->sdiodev->state = BRCMF_STATE_DATA;
+       brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
        bcmerror = 0;
 
 err:
@@ -3548,6 +3523,14 @@ done:
        return err;
 }
 
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
+{
+       if (atomic_read(&bus->dpc_tskcnt) == 0) {
+               atomic_inc(&bus->dpc_tskcnt);
+               queue_work(bus->brcmf_wq, &bus->datawork);
+       }
+}
+
 void brcmf_sdio_isr(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
@@ -3557,7 +3540,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
                return;
        }
 
-       if (bus->sdiodev->state != BRCMF_STATE_DATA) {
+       if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
                brcmf_err("bus is down. we have nothing to do\n");
                return;
        }
@@ -3602,9 +3585,8 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                                                            SDIO_CCCR_INTx,
                                                            NULL);
                                sdio_release_host(bus->sdiodev->func[1]);
-                               intstatus =
-                                   devpend & (INTR_STATUS_FUNC1 |
-                                              INTR_STATUS_FUNC2);
+                               intstatus = devpend & (INTR_STATUS_FUNC1 |
+                                                      INTR_STATUS_FUNC2);
                        }
 
                        /* If there is something, make like the ISR and
@@ -3623,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
        }
 #ifdef DEBUG
        /* Poll for console output periodically */
-       if (bus->sdiodev->state == BRCMF_STATE_DATA &&
+       if (bus->sdiodev->state == BRCMF_SDIOD_DATA &&
            bus->console_interval != 0) {
                bus->console.count += BRCMF_WD_POLL_MS;
                if (bus->console.count >= bus->console_interval) {
@@ -3667,6 +3649,11 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
                atomic_set(&bus->dpc_tskcnt, 0);
                brcmf_sdio_dpc(bus);
        }
+       if (brcmf_sdiod_freezing(bus->sdiodev)) {
+               brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
+               brcmf_sdiod_try_freeze(bus->sdiodev);
+               brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+       }
 }
 
 static void
@@ -3944,13 +3931,19 @@ static int
 brcmf_sdio_watchdog_thread(void *data)
 {
        struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+       int wait;
 
        allow_signal(SIGTERM);
        /* Run until signal received */
+       brcmf_sdiod_freezer_count(bus->sdiodev);
        while (1) {
                if (kthread_should_stop())
                        break;
-               if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
+               brcmf_sdiod_freezer_uncount(bus->sdiodev);
+               wait = wait_for_completion_interruptible(&bus->watchdog_wait);
+               brcmf_sdiod_freezer_count(bus->sdiodev);
+               brcmf_sdiod_try_freeze(bus->sdiodev);
+               if (!wait) {
                        brcmf_sdio_bus_watchdog(bus);
                        /* Count the tick for reference */
                        bus->sdcnt.tickcnt++;
@@ -3971,7 +3964,7 @@ brcmf_sdio_watchdog(unsigned long data)
                /* Reschedule the watchdog */
                if (bus->wd_timer_valid)
                        mod_timer(&bus->timer,
-                                 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+                                 jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
        }
 }
 
@@ -4089,6 +4082,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 {
        int ret;
        struct brcmf_sdio *bus;
+       struct workqueue_struct *wq;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4117,12 +4111,16 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
                        bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
        }
 
-       INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
-       bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
-       if (bus->brcmf_wq == NULL) {
+       /* single-threaded workqueue */
+       wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
+                                    dev_name(&sdiodev->func[1]->dev));
+       if (!wq) {
                brcmf_err("insufficient memory to create txworkqueue\n");
                goto fail;
        }
+       brcmf_sdiod_freezer_count(sdiodev);
+       INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+       bus->brcmf_wq = wq;
 
        /* attempt to attach to the dongle */
        if (!(brcmf_sdio_probe_attach(bus))) {
@@ -4143,7 +4141,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        /* Initialize watchdog thread */
        init_completion(&bus->watchdog_wait);
        bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
-                                       bus, "brcmf_watchdog");
+                                       bus, "brcmf_wdog/%s",
+                                       dev_name(&sdiodev->func[1]->dev));
        if (IS_ERR(bus->watchdog_tsk)) {
                pr_warn("brcmf_watchdog thread failed to start\n");
                bus->watchdog_tsk = NULL;
@@ -4242,7 +4241,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
                        destroy_workqueue(bus->brcmf_wq);
 
                if (bus->ci) {
-                       if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) {
+                       if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
                                sdio_claim_host(bus->sdiodev->func[1]);
                                brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
                                /* Leave the device in state where it is
@@ -4277,7 +4276,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
        }
 
        /* don't start the wd until fw is loaded */
-       if (bus->sdiodev->state != BRCMF_STATE_DATA)
+       if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
                return;
 
        if (wdtick) {
@@ -4290,16 +4289,28 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
                           dynamically changed or in the first instance
                         */
                        bus->timer.expires =
-                               jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
+                               jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
                        add_timer(&bus->timer);
 
                } else {
                        /* Re arm the timer, at last watchdog period */
                        mod_timer(&bus->timer,
-                               jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+                               jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
                }
 
                bus->wd_timer_valid = true;
                bus->save_ms = wdtick;
        }
 }
+
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
+{
+       int ret;
+
+       sdio_claim_host(bus->sdiodev->func[1]);
+       ret = brcmf_sdio_bus_sleep(bus, sleep, false);
+       sdio_release_host(bus->sdiodev->func[1]);
+
+       return ret;
+}
+
index ec2586a8425cf31d4f84bc053afb0e4a2c3ab5ea..7328478b2d7bf5b67c88428e999baf6d69c3f3bd 100644 (file)
 /* watchdog polling interval in ms */
 #define BRCMF_WD_POLL_MS       10
 
-/* The state of the bus */
-enum brcmf_sdio_state {
-       BRCMF_STATE_DOWN,       /* Device available, still initialising */
-       BRCMF_STATE_DATA,       /* Ready for data transfers, DPC enabled */
-       BRCMF_STATE_NOMEDIUM    /* No medium access to dongle possible */
+/**
+ * enum brcmf_sdiod_state - the state of the bus.
+ *
+ * @BRCMF_SDIOD_DOWN: Device can be accessed, no DPC.
+ * @BRCMF_SDIOD_DATA: Ready for data transfers, DPC enabled.
+ * @BRCMF_SDIOD_NOMEDIUM: No medium access to dongle possible.
+ */
+enum brcmf_sdiod_state {
+       BRCMF_SDIOD_DOWN,
+       BRCMF_SDIOD_DATA,
+       BRCMF_SDIOD_NOMEDIUM
 };
 
 struct brcmf_sdreg {
@@ -169,15 +175,13 @@ struct brcmf_sdreg {
 };
 
 struct brcmf_sdio;
+struct brcmf_sdiod_freezer;
 
 struct brcmf_sdio_dev {
        struct sdio_func *func[SDIO_MAX_FUNCS];
        u8 num_funcs;                   /* Supported funcs on client */
        u32 sbwad;                      /* Save backplane window address */
        struct brcmf_sdio *bus;
-       atomic_t suspend;               /* suspend flag */
-       bool sleeping;
-       wait_queue_head_t idle_wait;
        struct device *dev;
        struct brcmf_bus *bus_if;
        struct brcmfmac_sdio_platform_data *pdata;
@@ -194,7 +198,8 @@ struct brcmf_sdio_dev {
        char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
        char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
        bool wowl_enabled;
-       enum brcmf_sdio_state state;
+       enum brcmf_sdiod_state state;
+       struct brcmf_sdiod_freezer *freezer;
 };
 
 /* sdio core registers */
@@ -337,6 +342,28 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
 /* Issue an abort to the specified function */
 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+                             enum brcmf_sdiod_state state);
+#ifdef CONFIG_PM_SLEEP
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
+#else
+static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+       return false;
+}
+static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdio_remove(struct brcmf_sdio *bus);
@@ -344,5 +371,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus);
 
 void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep);
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus);
 
 #endif /* BRCMFMAC_SDIO_H */
index eb8584a9c49a84d28a5ec592ce943a6243e3029f..c84af1dfc88fdde21df4c384a82e849ba827464a 100644 (file)
@@ -4668,7 +4668,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
        brcms_c_coredisable(wlc_hw);
 
        /* Match driver "down" state */
-       bcma_core_pci_down(wlc_hw->d11core->bus);
+       bcma_host_pci_down(wlc_hw->d11core->bus);
 
        /* turn off pll and xtal to match driver "down" state */
        brcms_b_xtal(wlc_hw, OFF);
@@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
         * Configure pci/pcmcia here instead of in brcms_c_attach()
         * to allow mfg hotswap:  down, hotswap (chip power cycle), up.
         */
-       bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
+       bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
                              true);
 
        /*
@@ -4969,12 +4969,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
         */
        if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
                /* put SB PCI in down state again */
-               bcma_core_pci_down(wlc_hw->d11core->bus);
+               bcma_host_pci_down(wlc_hw->d11core->bus);
                brcms_b_xtal(wlc_hw, OFF);
                return -ENOMEDIUM;
        }
 
-       bcma_core_pci_up(wlc_hw->d11core->bus);
+       bcma_host_pci_up(wlc_hw->d11core->bus);
 
        /* reset the d11 core */
        brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -5171,7 +5171,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
 
                /* turn off primary xtal and pll */
                if (!wlc_hw->noreset) {
-                       bcma_core_pci_down(wlc_hw->d11core->bus);
+                       bcma_host_pci_down(wlc_hw->d11core->bus);
                        brcms_b_xtal(wlc_hw, OFF);
                }
        }
index 084f18f4f95039c921b1c82c548c904fcb6ccf67..99dac9b8a082c0bfcccf2769ef30585ea3af4e2d 100644 (file)
@@ -23041,10 +23041,7 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
        else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL)
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
                                     NPHY_RSSI_SEL_W1);
-       else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL)
-               wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
-                                    NPHY_RSSI_SEL_W2);
-       else
+       else /* RADIO_2055_WBRSSI_G2_SEL */
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
                                     NPHY_RSSI_SEL_W2);
        if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL)
@@ -23053,13 +23050,9 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
        else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL)
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
                                     NPHY_RSSI_SEL_W1);
-       else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL)
+       else /* RADIO_2055_WBRSSI_G1_SEL */
                wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
                                     NPHY_RSSI_SEL_W2);
-       else
-               wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
-                                    NPHY_RSSI_SEL_W2);
-
        wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type);
 
        write_phy_reg(pi, 0x91, rfctrlintc_state[0]);
index 4a47c7f8a246dac5c2fd3770163341adbeeec73b..89bc18cd6700176406a090ea4ff1047546470e82 100644 (file)
@@ -293,7 +293,7 @@ void cw1200_remove_interface(struct ieee80211_hw *dev,
        }
        priv->vif = NULL;
        priv->mode = NL80211_IFTYPE_MONITOR;
-       memset(priv->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(priv->mac_addr);
        memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo));
        cw1200_free_keys(priv);
        cw1200_setup_mac(priv);
index 0bd541175ecda7d9277e4abd33b6b6b4148bdd8c..d28bd49cb5fd16132623630542788b448b2e8542 100644 (file)
@@ -1429,7 +1429,7 @@ void cw1200_link_id_gc_work(struct work_struct *work)
                                priv->link_id_map &= ~mask;
                                priv->sta_asleep_mask &= ~mask;
                                priv->pspoll_mask &= ~mask;
-                               memset(map_link.mac_addr, 0, ETH_ALEN);
+                               eth_zero_addr(map_link.mac_addr);
                                spin_unlock_bh(&priv->ps_state_lock);
                                reset.link_id = i + 1;
                                wsm_reset(priv, &reset);
index 8bde776894695effab540ae5ee7e0f2e3abee691..055e11d353caf4c679688f45c35e8f02de4bce42 100644 (file)
@@ -174,8 +174,8 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
                /* send broadcast and multicast frames to broadcast RA, if
                 * configured; otherwise, use unicast RA of the WDS link */
                if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
-                   skb->data[0] & 0x01)
-                       memset(&hdr.addr1, 0xff, ETH_ALEN);
+                   is_multicast_ether_addr(skb->data))
+                       eth_broadcast_addr(hdr.addr1);
                else if (iface->type == HOSTAP_INTERFACE_WDS)
                        memcpy(&hdr.addr1, iface->u.wds.remote_addr,
                               ETH_ALEN);
index fd8d83dd4f62ab09b22126c4b330af1ef9b571d9..c995ace153ee6ecd53dff68145543889f6bbb424 100644 (file)
@@ -309,7 +309,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
        int i;
 
        PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
-       memset(addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(addr);
 
        resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
 
@@ -1015,8 +1015,8 @@ static void prism2_send_mgmt(struct net_device *dev,
                memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
        } else if (ieee80211_is_ctl(hdr->frame_control)) {
                /* control:ACK does not have addr2 or addr3 */
-               memset(hdr->addr2, 0, ETH_ALEN);
-               memset(hdr->addr3, 0, ETH_ALEN);
+               eth_zero_addr(hdr->addr2);
+               eth_zero_addr(hdr->addr3);
        } else {
                memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
                memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
@@ -1601,7 +1601,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
                memcpy(prev_ap, pos, ETH_ALEN);
                pos++; pos++; pos++; left -= 6;
        } else
-               memset(prev_ap, 0, ETH_ALEN);
+               eth_zero_addr(prev_ap);
 
        if (left >= 2) {
                unsigned int ileft;
index de7c4ffec3096b07ccaece961666b0bdde51ea27..7635ac4f6679625962d3003b6996c93c481b070f 100644 (file)
@@ -442,7 +442,7 @@ static void handle_info_queue_linkstatus(local_info_t *local)
        } else {
                netif_carrier_off(local->dev);
                netif_carrier_off(local->ddev);
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
        }
        wrqu.ap_addr.sa_family = ARPHRD_ETHER;
 
index 8f9f3e9fbfce1bdb818b9d4ea5b1c1fa6edc0969..01de1a3bf94ef0965d03dfd9c6a23d5fd2f744b6 100644 (file)
@@ -224,7 +224,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
 
        if (selected) {
                if (do_not_remove)
-                       memset(selected->u.wds.remote_addr, 0, ETH_ALEN);
+                       eth_zero_addr(selected->u.wds.remote_addr);
                else {
                        hostap_remove_interface(selected->dev, rtnl_locked, 0);
                        local->wds_connections--;
@@ -1087,7 +1087,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
 
        ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
                                   (u8 *) &val, 2);
-       memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+       eth_zero_addr(wrqu.ap_addr.sa_data);
        wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
        return ret;
 }
index 57904015380f05f28f5b555a8322caef59ab699d..ca25283e1c9201b566b09fcbe02a8aaa6c9c1811 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/interrupt.h>
 #include <linux/wireless.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/mutex.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211_radiotap.h>
@@ -85,16 +86,16 @@ struct hfa384x_rx_frame {
        /* 802.11 */
        __le16 frame_control;
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6];
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl;
-       u8 addr4[6];
+       u8 addr4[ETH_ALEN];
        __le16 data_len;
 
        /* 802.3 */
-       u8 dst_addr[6];
-       u8 src_addr[6];
+       u8 dst_addr[ETH_ALEN];
+       u8 src_addr[ETH_ALEN];
        __be16 len;
 
        /* followed by frame data; max 2304 bytes */
@@ -114,16 +115,16 @@ struct hfa384x_tx_frame {
        /* 802.11 */
        __le16 frame_control; /* parts not used */
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6]; /* filled by firmware */
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN]; /* filled by firmware */
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl; /* filled by firmware */
-       u8 addr4[6];
+       u8 addr4[ETH_ALEN];
        __le16 data_len;
 
        /* 802.3 */
-       u8 dst_addr[6];
-       u8 src_addr[6];
+       u8 dst_addr[ETH_ALEN];
+       u8 src_addr[ETH_ALEN];
        __be16 len;
 
        /* followed by frame data; max 2304 bytes */
@@ -156,7 +157,7 @@ struct hfa384x_hostscan_request {
 } __packed;
 
 struct hfa384x_join_request {
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        __le16 channel;
 } __packed;
 
@@ -228,7 +229,7 @@ struct hfa384x_scan_result {
        __le16 chid;
        __le16 anl;
        __le16 sl;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        __le16 beacon_interval;
        __le16 capability;
        __le16 ssid_len;
@@ -241,7 +242,7 @@ struct hfa384x_hostscan_result {
        __le16 chid;
        __le16 anl;
        __le16 sl;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        __le16 beacon_interval;
        __le16 capability;
        __le16 ssid_len;
@@ -824,7 +825,7 @@ struct local_info {
 #define PRISM2_INFO_PENDING_SCANRESULTS 1
        int prev_link_status; /* previous received LinkStatus info */
        int prev_linkstatus_connected;
-       u8 preferred_ap[6]; /* use this AP if possible */
+       u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */
 
 #ifdef PRISM2_CALLBACK
        void *callback_data; /* Can be used in callbacks; e.g., allocate
index 6fabea0309dd9a208d82be0451e07d24af45ab16..08eb229e7816010f11e702d679cb178b213362d2 100644 (file)
@@ -2147,8 +2147,8 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
                return;
        }
 
-       memset(priv->bssid, 0, ETH_ALEN);
-       memset(priv->ieee->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
+       eth_zero_addr(priv->ieee->bssid);
 
        netif_carrier_off(priv->net_dev);
        netif_stop_queue(priv->net_dev);
@@ -6956,7 +6956,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev,
                wrqu->ap_addr.sa_family = ARPHRD_ETHER;
                memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
        } else
-               memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu->ap_addr.sa_data);
 
        IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data);
        return 0;
@@ -8300,7 +8300,7 @@ static void ipw2100_wx_event_work(struct work_struct *work)
            priv->status & STATUS_RF_KILL_MASK ||
            ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
                                &priv->bssid, &len)) {
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
        } else {
                /* We now have the BSSID, so can finish setting to the full
                 * associated state */
index 67cad9b05ad821fc720da095aced2a9fe72202b8..39f3e6f5cbcd230a49145d0bf0f589cc85d4abb7 100644 (file)
@@ -1964,7 +1964,7 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
        if (priv->status & STATUS_ASSOCIATED)
                memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
        else
-               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu.ap_addr.sa_data);
        wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
 }
 
@@ -7400,7 +7400,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
        memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
 
        if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
-               memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
+               eth_broadcast_addr(priv->assoc_request.dest);
                priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
        } else {
                memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
@@ -8986,7 +8986,7 @@ static int ipw_wx_get_wap(struct net_device *dev,
                wrqu->ap_addr.sa_family = ARPHRD_ETHER;
                memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
        } else
-               memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+               eth_zero_addr(wrqu->ap_addr.sa_data);
 
        IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
                     wrqu->ap_addr.sa_data);
index eaaeea19d8c5bcc99d887ee7b3fd9c592b897045..bac60b2bc3f014a53668841e934f5062207f4080 100644 (file)
@@ -1678,7 +1678,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
                    lq_sta->total_success > lq_sta->max_success_limit ||
                    (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
                     flush_interval_passed)) {
-                       D_RATE("LQ: stay is expired %d %d %d\n:",
+                       D_RATE("LQ: stay is expired %d %d %d\n",
                               lq_sta->total_failed, lq_sta->total_success,
                               flush_interval_passed);
 
index 2c4fa49686ef1fdfad904b6450efff7151b18f18..887114582583b2e477cdad704b99f3a6e64bc742 100644 (file)
@@ -4634,7 +4634,7 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        il->vif = NULL;
        il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
        il_teardown_interface(il, vif);
-       memset(il->bssid, 0, ETH_ALEN);
+       eth_zero_addr(il->bssid);
 
        D_MAC80211("leave\n");
        mutex_unlock(&il->mutex);
index c4d6dd7402d9066dd1fb171103f8cc22dc438eae..234e30f498b2dde18f3f355d5cfdcf13db6dc185 100644 (file)
@@ -1549,7 +1549,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
                                      table.blink1, table.blink2, table.ilink1,
                                      table.ilink2, table.bcon_time, table.gp1,
                                      table.gp2, table.gp3, table.ucode_ver,
-                                     table.hw_ver, table.brd_ver);
+                                     table.hw_ver, 0, table.brd_ver);
        IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
        IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
index 97e38d2e2983b757717be0dcac1aa3b5be262336..0597a9cfd2f60c26f319afa6954322b86d0476b7 100644 (file)
@@ -77,8 +77,8 @@
 #define IWL3160_UCODE_API_OK   10
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  9
-#define IWL3160_UCODE_API_MIN  9
+#define IWL7260_UCODE_API_MIN  10
+#define IWL3160_UCODE_API_MIN  10
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
index 2f7fe8167dc963259dfaea5469de47dc2bbad6f3..d8dfa6da63072650dcf668d1e16d54ffddff0345 100644 (file)
@@ -75,7 +75,7 @@
 #define IWL8000_UCODE_API_OK   10
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  9
+#define IWL8000_UCODE_API_MIN  10
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
index 78bd41bf34b0f04dac4d056d470ac30a80b212c8..53555a0fce56093071c1bbfb40d8cb5f5d300dd8 100644 (file)
@@ -431,11 +431,11 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
        TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
                 u32 data1, u32 data2, u32 line, u32 blink1,
                 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
-                u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
+                u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
                 u32 brd_ver),
        TP_ARGS(dev, desc, tsf_low, data1, data2, line,
                blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
-               gp3, ucode_ver, hw_ver, brd_ver),
+               gp3, major, minor, hw_ver, brd_ver),
        TP_STRUCT__entry(
                DEV_ENTRY
                __field(u32, desc)
@@ -451,7 +451,8 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
                __field(u32, gp1)
                __field(u32, gp2)
                __field(u32, gp3)
-               __field(u32, ucode_ver)
+               __field(u32, major)
+               __field(u32, minor)
                __field(u32, hw_ver)
                __field(u32, brd_ver)
        ),
@@ -470,21 +471,22 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
                __entry->gp1 = gp1;
                __entry->gp2 = gp2;
                __entry->gp3 = gp3;
-               __entry->ucode_ver = ucode_ver;
+               __entry->major = major;
+               __entry->minor = minor;
                __entry->hw_ver = hw_ver;
                __entry->brd_ver = brd_ver;
        ),
        TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
                  "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
-                 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
-                 "hw 0x%08X brd 0x%08X",
+                 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
+                 "minor 0x%08X hw 0x%08X brd 0x%08X",
                  __get_str(dev), __entry->desc, __entry->tsf_low,
                  __entry->data1,
                  __entry->data2, __entry->line, __entry->blink1,
                  __entry->blink2, __entry->ilink1, __entry->ilink2,
                  __entry->bcon_time, __entry->gp1, __entry->gp2,
-                 __entry->gp3, __entry->ucode_ver, __entry->hw_ver,
-                 __entry->brd_ver)
+                 __entry->gp3, __entry->major, __entry->minor,
+                 __entry->hw_ver, __entry->brd_ver)
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_event,
index 996e7f16adf9feafc50cb5d56596a2b80e0cafb3..141331d41abf28c59059b2e2ce50051a67b5491d 100644 (file)
@@ -175,6 +175,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
        kfree(drv->fw.dbg_dest_tlv);
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
                kfree(drv->fw.dbg_conf_tlv[i]);
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
+               kfree(drv->fw.dbg_trigger_tlv[i]);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
@@ -293,8 +295,10 @@ struct iwl_firmware_pieces {
 
        /* FW debug data parsed for driver usage */
        struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
-       size_t dbg_conf_tlv_len[FW_DBG_MAX];
+       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 };
 
 /*
@@ -842,6 +846,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        capa->n_scan_channels =
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
+               case IWL_UCODE_TLV_FW_VERSION: {
+                       __le32 *ptr = (void *)tlv_data;
+                       u32 major, minor;
+                       u8 local_comp;
+
+                       if (tlv_len != sizeof(u32) * 3)
+                               goto invalid_tlv_len;
+
+                       major = le32_to_cpup(ptr++);
+                       minor = le32_to_cpup(ptr++);
+                       local_comp = le32_to_cpup(ptr);
+
+                       snprintf(drv->fw.fw_version,
+                                sizeof(drv->fw.fw_version), "%u.%u.%u",
+                                major, minor, local_comp);
+                       break;
+                       }
                case IWL_UCODE_TLV_FW_DBG_DEST: {
                        struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
 
@@ -897,6 +918,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
                        break;
                        }
+               case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
+                       struct iwl_fw_dbg_trigger_tlv *trigger =
+                               (void *)tlv_data;
+                       u32 trigger_id = le32_to_cpu(trigger->id);
+
+                       if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+                               IWL_ERR(drv,
+                                       "Skip unknown trigger: %u\n",
+                                       trigger->id);
+                               break;
+                       }
+
+                       if (pieces->dbg_trigger_tlv[trigger_id]) {
+                               IWL_ERR(drv,
+                                       "Ignore duplicate dbg trigger %u\n",
+                                       trigger->id);
+                               break;
+                       }
+
+                       IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
+
+                       pieces->dbg_trigger_tlv[trigger_id] = trigger;
+                       pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
+                       break;
+                       }
                case IWL_UCODE_TLV_SEC_RT_USNIFFER:
                        usniffer_images = true;
                        iwl_store_ucode_sec(pieces, tlv_data,
@@ -1107,7 +1153,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        if (err)
                goto try_again;
 
-       api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+       if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+               api_ver = drv->fw.ucode_ver;
+       else
+               api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
 
        /*
         * api_ver should match the api version forming part of the
@@ -1178,6 +1227,19 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+               if (pieces->dbg_trigger_tlv[i]) {
+                       drv->fw.dbg_trigger_tlv_len[i] =
+                               pieces->dbg_trigger_tlv_len[i];
+                       drv->fw.dbg_trigger_tlv[i] =
+                               kmemdup(pieces->dbg_trigger_tlv[i],
+                                       drv->fw.dbg_trigger_tlv_len[i],
+                                       GFP_KERNEL);
+                       if (!drv->fw.dbg_trigger_tlv[i])
+                               goto out_free_fw;
+               }
+       }
+
        /* Now that we can no longer fail, copy information */
 
        /*
index 919a2548a92c5dca064258c437cdb8e59ade42db..37b38a585dd182f8fa1c44256f4a16ecc008d3df 100644 (file)
@@ -82,6 +82,8 @@
  *     sections like this in a single file.
  * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
+ * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
+ *     Structured as &struct iwl_fw_error_dump_trigger_desc.
  */
 enum iwl_fw_error_dump_type {
        /* 0 is deprecated */
@@ -94,6 +96,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_TXF = 7,
        IWL_FW_ERROR_DUMP_FH_REGS = 8,
        IWL_FW_ERROR_DUMP_MEM = 9,
+       IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -230,4 +233,47 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
        return (void *)(data->data + le32_to_cpu(data->len));
 }
 
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ *     This should not be defined as a trigger to the driver, but a value the
+ *     driver should set to indicate that the trigger was initiated by the
+ *     user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ *     missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ *     command response or a notification.
+ * @FW_DB_TRIGGER_RESERVED: reserved
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ *     goes below a threshold.
+ */
+enum iwl_fw_dbg_trigger {
+       FW_DBG_TRIGGER_INVALID = 0,
+       FW_DBG_TRIGGER_USER,
+       FW_DBG_TRIGGER_FW_ASSERT,
+       FW_DBG_TRIGGER_MISSED_BEACONS,
+       FW_DBG_TRIGGER_CHANNEL_SWITCH,
+       FW_DBG_TRIGGER_FW_NOTIF,
+       FW_DB_TRIGGER_RESERVED,
+       FW_DBG_TRIGGER_STATS,
+       FW_DBG_TRIGGER_RSSI,
+
+       /* must be last */
+       FW_DBG_TRIGGER_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
+ * @type: %enum iwl_fw_dbg_trigger
+ * @data: raw data about what happened
+ */
+struct iwl_fw_error_dump_trigger_desc {
+       __le32 type;
+       u8 data[];
+};
+
 #endif /* __fw_error_dump_h__ */
index 016d913846818d2ea3731c1b4c0fae4b088f5363..5ea381861d5d2c46c1dce922c40967681dc60185 100644 (file)
@@ -66,6 +66,7 @@
 #define __iwl_fw_file_h__
 
 #include <linux/netdevice.h>
+#include <linux/nl80211.h>
 
 /* v1/v2 uCode file layout */
 struct iwl_ucode_header {
@@ -133,8 +134,10 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_N_SCAN_CHANNELS           = 31,
        IWL_UCODE_TLV_SEC_RT_USNIFFER   = 34,
        IWL_UCODE_TLV_SDIO_ADMA_ADDR    = 35,
+       IWL_UCODE_TLV_FW_VERSION        = 36,
        IWL_UCODE_TLV_FW_DBG_DEST       = 38,
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
+       IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
 };
 
 struct iwl_ucode_tlv {
@@ -156,7 +159,8 @@ struct iwl_tlv_ucode_header {
        __le32 zero;
        __le32 magic;
        u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
-       __le32 ver;             /* major/minor/API/serial */
+       /* major/minor/API/serial or major in new format */
+       __le32 ver;
        __le32 build;
        __le64 ignore;
        /*
@@ -237,7 +241,6 @@ enum iwl_ucode_tlv_flag {
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
  * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
- * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
  * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
@@ -250,11 +253,12 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
  * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
+ * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
        IWL_UCODE_TLV_API_DISABLE_STA_TX        = BIT(5),
-       IWL_UCODE_TLV_API_LMAC_SCAN             = BIT(6),
        IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
        IWL_UCODE_TLV_API_HDC_PHASE_0           = BIT(10),
@@ -263,6 +267,8 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
        IWL_UCODE_TLV_API_ASYNC_DTM             = BIT(17),
        IWL_UCODE_TLV_API_LQ_SS_PARAMS          = BIT(18),
+       IWL_UCODE_TLV_API_STATS_V10             = BIT(19),
+       IWL_UCODE_TLV_API_NEW_VERSION           = BIT(20),
 };
 
 /**
@@ -284,6 +290,8 @@ enum iwl_ucode_tlv_api {
  *     which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
@@ -298,6 +306,8 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = BIT(13),
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
+       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = BIT(22),
+       IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = BIT(28),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -450,44 +460,129 @@ struct iwl_fw_dbg_conf_hcmd {
 } __packed;
 
 /**
- * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration
+ * enum iwl_fw_dbg_trigger_mode - triggers functionalities
  *
- * @enabled: is this trigger enabled
- * @reserved:
- * @len: length, in bytes, of the %trigger field
- * @trigger: pointer to a trigger struct
+ * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
  */
-struct iwl_fw_dbg_trigger {
-       u8 enabled;
-       u8 reserved;
-       u8 len;
-       u8 trigger[0];
+enum iwl_fw_dbg_trigger_mode {
+       IWL_FW_DBG_TRIGGER_START = BIT(0),
+       IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWL_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ */
+enum iwl_fw_dbg_trigger_vif_type {
+       IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
+       IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
+       IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
+       IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
+       IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
+       IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
+       IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
+};
+
+/**
+ * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: %enum iwl_fw_dbg_trigger
+ * @vif_type: %enum iwl_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ *     if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ *     to the currently running configuration is set, the data should be
+ *     collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ *     after the STOP trigger fires.
+ * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
+ *     configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ */
+struct iwl_fw_dbg_trigger_tlv {
+       __le32 id;
+       __le32 vif_type;
+       __le32 stop_conf_ids;
+       __le32 stop_delay;
+       u8 mode;
+       u8 start_conf_id;
+       __le16 occurrences;
+       __le32 reserved[2];
+
+       u8 data[0];
 } __packed;
 
+#define FW_DBG_START_FROM_ALIVE        0
+#define FW_DBG_CONF_MAX                32
+#define FW_DBG_INVALID         0xff
+
 /**
- * enum iwl_fw_dbg_conf - configurations available
- *
- * @FW_DBG_CUSTOM: take this configuration from alive
- *     Note that the trigger is NO-OP for this configuration
+ * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwl_fw_dbg_trigger_missed_bcon {
+       __le32 stop_consec_missed_bcon;
+       __le32 stop_consec_missed_bcon_since_rx;
+       __le32 reserved2[2];
+       __le32 start_consec_missed_bcon;
+       __le32 start_consec_missed_bcon_since_rx;
+       __le32 reserved1[2];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
  */
-enum iwl_fw_dbg_conf {
-       FW_DBG_CUSTOM = 0,
+struct iwl_fw_dbg_trigger_cmd {
+       struct cmd {
+               u8 cmd_id;
+               u8 group_id;
+       } __packed cmds[16];
+} __packed;
 
-       /* must be last */
-       FW_DBG_MAX,
-       FW_DBG_INVALID = 0xff,
-};
+/**
+ * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwl_fw_dbg_trigger_stats {
+       __le32 stop_offset;
+       __le32 stop_threshold;
+       __le32 start_offset;
+       __le32 start_threshold;
+} __packed;
 
 /**
- * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration
- *
- * @id: %enum iwl_fw_dbg_conf
+ * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwl_fw_dbg_trigger_low_rssi {
+       __le32 rssi;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
  * @usniffer: should the uSniffer image be used
  * @num_of_hcmds: how many HCMDs to send are present here
  * @hcmd: a variable length host command to be sent to apply the configuration.
  *     If there is more than one HCMD to send, they will appear one after the
  *     other and be sent in the order that they appear in.
- * This parses IWL_UCODE_TLV_FW_DBG_CONF
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %FW_DBG_CONF_MAX configuration per run.
  */
 struct iwl_fw_dbg_conf_tlv {
        u8 id;
@@ -495,8 +590,6 @@ struct iwl_fw_dbg_conf_tlv {
        u8 reserved;
        u8 num_of_hcmds;
        struct iwl_fw_dbg_conf_hcmd hcmd;
-
-       /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
 } __packed;
 
 #endif  /* __iwl_fw_file_h__ */
index ffd785cc67d6703a395b1809322de6a9f83f5f3d..cf75bafae51da0f60255d9a7f98f7d2f5e18bcac 100644 (file)
@@ -68,6 +68,7 @@
 #include <net/mac80211.h>
 
 #include "iwl-fw-file.h"
+#include "iwl-fw-error-dump.h"
 
 /**
  * enum iwl_ucode_type
@@ -157,6 +158,8 @@ struct iwl_fw_cscheme_list {
  * @dbg_dest_tlv: points to the destination TLV for debug
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs
+ * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
  */
 struct iwl_fw {
@@ -186,9 +189,10 @@ struct iwl_fw {
        u32 sdio_adma_addr;
 
        struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
-       size_t dbg_conf_tlv_len[FW_DBG_MAX];
-
+       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
 };
 
@@ -206,46 +210,29 @@ static inline const char *get_fw_dbg_mode_string(int mode)
        }
 }
 
-static inline const struct iwl_fw_dbg_trigger *
-iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
 {
        const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
-       u8 *ptr;
-       int i;
 
        if (!conf_tlv)
-               return NULL;
-
-       ptr = (void *)&conf_tlv->hcmd;
-       for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
-               ptr += sizeof(conf_tlv->hcmd);
-               ptr += le16_to_cpu(conf_tlv->hcmd.len);
-       }
-
-       return (const struct iwl_fw_dbg_trigger *)ptr;
-}
-
-static inline bool
-iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
-{
-       const struct iwl_fw_dbg_trigger *trigger =
-               iwl_fw_dbg_conf_get_trigger(fw, id);
-
-       if (!trigger)
                return false;
 
-       return trigger->enabled;
+       return conf_tlv->usniffer;
 }
 
-static inline bool
-iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
-{
-       const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+#define iwl_fw_dbg_trigger_enabled(fw, id) ({                  \
+       void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)];      \
+       unlikely(__dbg_trigger);                                \
+})
 
-       if (!conf_tlv)
-               return false;
+static inline struct iwl_fw_dbg_trigger_tlv*
+iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id)
+{
+       if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv)))
+               return NULL;
 
-       return conf_tlv->usniffer;
+       return fw->dbg_trigger_tlv[id];
 }
 
 #endif  /* __iwl_fw_h__ */
index d4fb5cad07ea1d36c508c25ec83450a2e80e3807..e893c6eb260cd4866b7c7d03c1437b9d479ad9de 100644 (file)
@@ -72,7 +72,7 @@
 #include "iwl-trans.h"
 
 #define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 7
+#define IWL_NUM_PAPD_CH_GROUPS 9
 #define IWL_NUM_TXP_CH_GROUPS  9
 
 struct iwl_phy_db_entry {
index 6221e4dfc64fcc0ee907d1dcec4e29394bb4933f..6095088b88d91c0fb9c58b82e74cc7e969e589a3 100644 (file)
@@ -370,7 +370,6 @@ enum secure_load_status_reg {
 #define MON_BUFF_CYCLE_CNT             (0xa03c48)
 
 #define DBGC_IN_SAMPLE                 (0xa03c00)
-#define DBGC_OUT_CTRL                  (0xa03c0c)
 
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
index a96bd8db6ceb67e7932ebf4c9a8b75a1f43094de..542a6810c81cba704b37a28235678a685ecf47ab 100644 (file)
@@ -595,6 +595,7 @@ enum iwl_d0i3_mode {
  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
  * @dbg_dest_tlv: points to the destination TLV for debug
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
  */
 struct iwl_trans {
@@ -628,7 +629,8 @@ struct iwl_trans {
        u64 dflt_pwr_limit;
 
        const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-       const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+       const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
        u8 dbg_dest_reg_num;
 
        enum iwl_d0i3_mode d0i3_mode;
index 1ec4d55155f7d72fecdbe19db62dd36546bb05e0..ce99572a982d66382e012f47792f7624c4b9ae26 100644 (file)
@@ -611,7 +611,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->enabled_modules |=
                        cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
 
-       if (IWL_MVM_BT_COEX_CORUNNING)
+       if (iwl_mvm_bt_is_plcr_supported(mvm))
                bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
 
        if (IWL_MVM_BT_COEX_MPLUT) {
@@ -1234,7 +1234,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
 
-       if (!IWL_MVM_BT_COEX_CORUNNING)
+       if (!iwl_mvm_bt_is_plcr_supported(mvm))
                return 0;
 
        lockdep_assert_held(&mvm->mutex);
index d530ef3da1071e5a421db3f4c072504b3571d8e3..9717ee61928cccdf78eefaee7c1a6f91ac736243 100644 (file)
@@ -619,7 +619,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
        if (IWL_MVM_BT_COEX_SYNC2SCO)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
-       if (IWL_MVM_BT_COEX_CORUNNING) {
+       if (iwl_mvm_bt_is_plcr_supported(mvm)) {
                bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
                                                     BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
@@ -1167,16 +1167,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
        return lut_type != BT_COEX_LOOSE_LUT;
 }
 
-bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
-{
-       u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-       return ag < BT_HIGH_TRAFFIC;
-}
-
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
 {
        u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-       return ag == BT_OFF;
+       return ag < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
@@ -1213,7 +1207,7 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
        };
 
-       if (!IWL_MVM_BT_COEX_CORUNNING)
+       if (!iwl_mvm_bt_is_plcr_supported(mvm))
                return 0;
 
        lockdep_assert_held(&mvm->mutex);
index 14e8fd6618897adbdd7e18adb849b51032736839..9bdfa95d6ce7325f54b2aefde320879297783a19 100644 (file)
@@ -1876,25 +1876,28 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 
        if (mvm->net_detect) {
                iwl_mvm_query_netdetect_reasons(mvm, vif);
+               /* has unlocked the mutex, so skip that */
+               goto out;
        } else {
                keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
                if (keep)
                        mvm->keep_vif = vif;
+               /* has unlocked the mutex, so skip that */
+               goto out_iterate;
 #endif
        }
-       /* has unlocked the mutex, so skip that */
-       goto out;
 
  out_unlock:
        mutex_unlock(&mvm->mutex);
 
- out:
+out_iterate:
        if (!test)
                ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
                        IEEE80211_IFACE_ITER_NORMAL,
                        iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
+out:
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
index 5fe14591e1c41f06d0f9c4893290165b3ea7674e..5f37eab5008d7a9ebe1719204f024c9d70e01434 100644 (file)
@@ -545,6 +545,57 @@ static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
        return ret ? count : -EINVAL;
 }
 
+static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct iwl_mvm_phy_ctxt *phy_ctxt;
+       u16 value;
+       int ret;
+
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       mutex_lock(&mvm->mutex);
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       /* make sure the channel context is assigned */
+       if (!chanctx_conf) {
+               rcu_read_unlock();
+               mutex_unlock(&mvm->mutex);
+               return -EINVAL;
+       }
+
+       phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
+       rcu_read_unlock();
+
+       mvm->dbgfs_rx_phyinfo = value;
+
+       ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
+                                      chanctx_conf->rx_chains_static,
+                                      chanctx_conf->rx_chains_dynamic);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       char buf[8];
+
+       snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -560,6 +611,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -575,7 +627,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
 
        mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
-       mvmvif->mvm = mvm;
 
        if (!mvmvif->dbgfs_dir) {
                IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
@@ -595,6 +646,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                 S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
+                                S_IRUSR | S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index 82c09d86af8c055d5fdda608756d094905898a8f..8cbe77dc1dbb991b6cad15ed10ae828590682a20 100644 (file)
@@ -942,7 +942,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
                                          size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
-       enum iwl_fw_dbg_conf conf;
+       int conf;
        char buf[8];
        const size_t bufsz = sizeof(buf);
        int pos = 0;
@@ -966,7 +966,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       if (WARN_ON(conf_id >= FW_DBG_MAX))
+       if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
                return -EINVAL;
 
        mutex_lock(&mvm->mutex);
@@ -985,7 +985,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       iwl_mvm_fw_dbg_collect(mvm);
+       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
index c405cda1025fa4b745b541d65b679bca1dd01253..aabaedd3b3ee1c6e3deeadc1ec7a9a9fe6059b83 100644 (file)
@@ -70,6 +70,7 @@
 #define MAC_INDEX_AUX          4
 #define MAC_INDEX_MIN_DRIVER   0
 #define NUM_MAC_INDEX_DRIVER   MAC_INDEX_AUX
+#define NUM_MAC_INDEX          (MAC_INDEX_AUX + 1)
 
 enum iwl_ac {
        AC_BK,
index cfc0e65b34a5e14494d83af1b560d0a33493aa66..a5fbbd637070795b97fafde922aabb50d7bcaf4c 100644 (file)
 
 /* Scan Commands, Responses, Notifications */
 
-/* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_ACTIVE       BIT(0)
-#define SCAN_CHANNEL_NARROW_BAND       BIT(22)
-
 /* Max number of IEs for direct SSID scans in a command */
 #define PROBE_OPTION_MAX               20
 
-/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
- * @channel: band is selected by iwl_scan_cmd "flags" field
- * @tx_gain: gain for analog radio
- * @dsp_atten: gain for DSP
- * @active_dwell: dwell time for active scan in TU, typically 5-50
- * @passive_dwell: dwell time for passive scan in TU, typically 20-500
- * @type: type is broken down to these bits:
- *     bit 0: 0 = passive, 1 = active
- *     bits 1-20: SSID direct bit map. If any of these bits is set then
- *             the corresponding SSID IE is transmitted in probe request
- *             (bit i adds IE in position i to the probe request)
- *     bit 22: channel width, 0 = regular, 1 = TGj narrow channel
- *
- * @iteration_count:
- * @iteration_interval:
- * This struct is used once for each channel in the scan list.
- * Each channel can independently select:
- * 1)  SSID for directed active scans
- * 2)  Txpower setting (for rate specified within Tx command)
- * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
- *     quiet_plcp_th, good_CRC_th)
- *
- * To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
- * 1)  If using passive_dwell (i.e. passive_dwell != 0):
- *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
- * 2)  quiet_time <= active_dwell
- * 3)  If restricting off-channel time (i.e. max_out_time !=0):
- *     passive_dwell < max_out_time
- *     active_dwell < max_out_time
- */
-struct iwl_scan_channel {
-       __le32 type;
-       __le16 channel;
-       __le16 iteration_count;
-       __le32 iteration_interval;
-       __le16 active_dwell;
-       __le16 passive_dwell;
-} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
-
 /**
  * struct iwl_ssid_ie - directed scan network information element
  *
@@ -132,152 +87,6 @@ struct iwl_ssid_ie {
        u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/**
- * iwl_scan_flags - masks for scan command flags
- *@SCAN_FLAGS_PERIODIC_SCAN:
- *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
- *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
- *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
- *@SCAN_FLAGS_FRAGMENTED_SCAN:
- *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
- *     in the past hour, even if they are marked as passive.
- */
-enum iwl_scan_flags {
-       SCAN_FLAGS_PERIODIC_SCAN                = BIT(0),
-       SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX   = BIT(1),
-       SCAN_FLAGS_DELAYED_SCAN_LOWBAND         = BIT(2),
-       SCAN_FLAGS_DELAYED_SCAN_HIGHBAND        = BIT(3),
-       SCAN_FLAGS_FRAGMENTED_SCAN              = BIT(4),
-       SCAN_FLAGS_PASSIVE2ACTIVE               = BIT(5),
-};
-
-/**
- * enum iwl_scan_type - Scan types for scan command
- * @SCAN_TYPE_FORCED:
- * @SCAN_TYPE_BACKGROUND:
- * @SCAN_TYPE_OS:
- * @SCAN_TYPE_ROAMING:
- * @SCAN_TYPE_ACTION:
- * @SCAN_TYPE_DISCOVERY:
- * @SCAN_TYPE_DISCOVERY_FORCED:
- */
-enum iwl_scan_type {
-       SCAN_TYPE_FORCED                = 0,
-       SCAN_TYPE_BACKGROUND            = 1,
-       SCAN_TYPE_OS                    = 2,
-       SCAN_TYPE_ROAMING               = 3,
-       SCAN_TYPE_ACTION                = 4,
-       SCAN_TYPE_DISCOVERY             = 5,
-       SCAN_TYPE_DISCOVERY_FORCED      = 6,
-}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
-
-/**
- * struct iwl_scan_cmd - scan request command
- * ( SCAN_REQUEST_CMD = 0x80 )
- * @len: command length in bytes
- * @scan_flags: scan flags from SCAN_FLAGS_*
- * @channel_count: num of channels in channel list
- *     (1 - ucode_capa.n_scan_channels)
- * @quiet_time: in msecs, dwell this time for active scan on quiet channels
- * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
- *     this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active during scan allowed
- * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in TUs, max out of serving channel time
- * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in TUs (suspend before executing)
- *     bits 20-23: reserved
- *     bits 24-31: number of beacons (suspend between channels)
- * @rxon_flags: RXON_FLG_*
- * @filter_flags: RXON_FILTER_*
- * @tx_cmd: for active scans (zero for passive), w/o payload,
- *     no RS so specify TX rate
- * @direct_scan: direct scan SSIDs
- * @type: one of SCAN_TYPE_*
- * @repeats: how many time to repeat the scan
- */
-struct iwl_scan_cmd {
-       __le16 len;
-       u8 scan_flags;
-       u8 channel_count;
-       __le16 quiet_time;
-       __le16 quiet_plcp_th;
-       __le16 passive2active;
-       __le16 rxchain_sel_flags;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       /* RX_ON_FLAGS_API_S_VER_1 */
-       __le32 rxon_flags;
-       __le32 filter_flags;
-       struct iwl_tx_cmd tx_cmd;
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-       __le32 type;
-       __le32 repeats;
-
-       /*
-        * Probe request frame, followed by channel list.
-        *
-        * Size of probe request frame is specified by byte count in tx_cmd.
-        * Channel list follows immediately after probe request frame.
-        * Number of channels in list is specified by channel_count.
-        * Each channel in list is of type:
-        *
-        * struct iwl_scan_channel channels[0];
-        *
-        * NOTE:  Only one band of channels can be scanned per pass.  You
-        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
-        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
-        * before requesting another scan.
-        */
-       u8 data[0];
-} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
-
-/* Response to scan request contains only status with one of these values */
-#define SCAN_RESPONSE_OK       0x1
-#define SCAN_RESPONSE_ERROR    0x2
-
-/*
- * SCAN_ABORT_CMD = 0x81
- * When scan abort is requested, the command has no fields except the common
- * header. The response contains only a status with one of these values.
- */
-#define SCAN_ABORT_POSSIBLE    0x1
-#define SCAN_ABORT_IGNORED     0x2 /* no pending scans */
-
-/* TODO: complete documentation */
-#define  SCAN_OWNER_STATUS 0x1
-#define  MEASURE_OWNER_STATUS 0x2
-
-/**
- * struct iwl_scan_start_notif - notifies start of scan in the device
- * ( SCAN_START_NOTIFICATION = 0x82 )
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
- * @beacon_timer: structured as follows:
- *     bits 0:19 - beacon interval in usecs
- *     bits 20:23 - reserved (0)
- *     bits 24:31 - number of beacons
- * @channel: which channel is scanned
- * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
- * @status: one of *_OWNER_STATUS
- */
-struct iwl_scan_start_notif {
-       __le32 tsf_low;
-       __le32 tsf_high;
-       __le32 beacon_timer;
-       u8 channel;
-       u8 band;
-       u8 reserved[2];
-       __le32 status;
-} __packed; /* SCAN_START_NTF_API_S_VER_1 */
-
-/* scan results probe_status first bit indicates success */
-#define SCAN_PROBE_STATUS_OK           0
-#define SCAN_PROBE_STATUS_TX_FAILED    BIT(0)
-/* error statuses combined with TX_FAILED */
-#define SCAN_PROBE_STATUS_FAIL_TTL     BIT(1)
-#define SCAN_PROBE_STATUS_FAIL_BT      BIT(2)
-
 /* How many statistics are gathered for each channel */
 #define SCAN_RESULTS_STATISTICS 1
 
index 928168b183467177a5b7b32b17133eb75f5d89c4..709e28d8b1b09634aa1e427a2544ae0ea669df59 100644 (file)
@@ -65,6 +65,7 @@
 
 #ifndef __fw_api_stats_h__
 #define __fw_api_stats_h__
+#include "fw-api-mac.h"
 
 struct mvm_statistics_dbg {
        __le32 burst_check;
@@ -218,7 +219,7 @@ struct mvm_statistics_bt_activity {
        __le32 lo_priority_rx_denied_cnt;
 } __packed;  /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
 
-struct mvm_statistics_general {
+struct mvm_statistics_general_v5 {
        __le32 radio_temperature;
        __le32 radio_voltage;
        struct mvm_statistics_dbg dbg;
@@ -244,6 +245,39 @@ struct mvm_statistics_general {
        struct mvm_statistics_bt_activity bt_activity;
 } __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
 
+struct mvm_statistics_general_v8 {
+       __le32 radio_temperature;
+       __le32 radio_voltage;
+       struct mvm_statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct mvm_statistics_div slow_div;
+       __le32 rx_enable_counter;
+       /*
+        * num_of_sos_states:
+        *  count the number of times we have to re-tune
+        *  in order to get out of bad PHY status
+        */
+       __le32 num_of_sos_states;
+       __le32 beacon_filtered;
+       __le32 missed_beacons;
+       __s8 beacon_filter_average_energy;
+       __s8 beacon_filter_reason;
+       __s8 beacon_filter_current_energy;
+       __s8 beacon_filter_reserved;
+       __le32 beacon_filter_delta_time;
+       struct mvm_statistics_bt_activity bt_activity;
+       __le64 rx_time;
+       __le64 on_time_rf;
+       __le64 on_time_scan;
+       __le64 tx_time;
+       __le32 beacon_counter[NUM_MAC_INDEX];
+       u8 beacon_average_energy[NUM_MAC_INDEX];
+       u8 reserved[4 - (NUM_MAC_INDEX % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+
 struct mvm_statistics_rx {
        struct mvm_statistics_rx_phy ofdm;
        struct mvm_statistics_rx_phy cck;
@@ -256,22 +290,28 @@ struct mvm_statistics_rx {
  *
  * By default, uCode issues this notification after receiving a beacon
  * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
- *
- * Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
- * 0x9c with CLEAR_STATS bit set (see above).
- *
- * uCode also issues this notification during scans.  uCode clears statistics
- * appropriately so that each notification contains statistics for only the
- * one channel that has just been scanned.
+ * STATISTICS_CMD (0x9c), below.
  */
 
-struct iwl_notif_statistics {
+struct iwl_notif_statistics_v8 {
        __le32 flag;
        struct mvm_statistics_rx rx;
        struct mvm_statistics_tx tx;
-       struct mvm_statistics_general general;
+       struct mvm_statistics_general_v5 general;
 } __packed; /* STATISTICS_NTFY_API_S_VER_8 */
 
+struct iwl_notif_statistics_v10 {
+       __le32 flag;
+       struct mvm_statistics_rx rx;
+       struct mvm_statistics_tx tx;
+       struct mvm_statistics_general_v8 general;
+} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
+
+#define IWL_STATISTICS_FLG_CLEAR               0x1
+#define IWL_STATISTICS_FLG_DISABLE_NOTIF       0x2
+
+struct iwl_statistics_cmd {
+       __le32 flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
 #endif /* __fw_api_stats_h__ */
index b56154fe8ec59cbb1db6b65aea1867e16f8504f9..d95b472137318863e0cac5109d383b83182d5cbe 100644 (file)
@@ -192,6 +192,7 @@ enum {
        BEACON_NOTIFICATION = 0x90,
        BEACON_TEMPLATE_CMD = 0x91,
        TX_ANT_CONFIGURATION_CMD = 0x98,
+       STATISTICS_CMD = 0x9c,
        STATISTICS_NOTIFICATION = 0x9d,
        EOSP_NOTIFICATION = 0x9e,
        REDUCE_TX_POWER_CMD = 0x9f,
@@ -431,7 +432,7 @@ enum {
 
 #define IWL_ALIVE_FLG_RFKILL   BIT(0)
 
-struct mvm_alive_resp {
+struct mvm_alive_resp_ver1 {
        __le16 status;
        __le16 flags;
        u8 ucode_minor;
@@ -482,6 +483,30 @@ struct mvm_alive_resp_ver2 {
        __le32 dbg_print_buff_addr;
 } __packed; /* ALIVE_RES_API_S_VER_2 */
 
+struct mvm_alive_resp {
+       __le16 status;
+       __le16 flags;
+       __le32 ucode_minor;
+       __le32 ucode_major;
+       u8 ver_subtype;
+       u8 ver_type;
+       u8 mac;
+       u8 opt;
+       __le32 timestamp;
+       __le32 error_event_table_ptr;   /* SRAM address for error log */
+       __le32 log_event_table_ptr;     /* SRAM address for LMAC event log */
+       __le32 cpu_register_ptr;
+       __le32 dbgm_config_ptr;
+       __le32 alive_counter_ptr;
+       __le32 scd_base_ptr;            /* SRAM address for SCD */
+       __le32 st_fwrd_addr;            /* pointer to Store and forward */
+       __le32 st_fwrd_size;
+       __le32 umac_minor;              /* UMAC version: minor */
+       __le32 umac_major;              /* UMAC version: major */
+       __le32 error_info_addr;         /* SRAM address for UMAC error log */
+       __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_3 */
+
 /* Error response/notification */
 enum {
        FW_ERR_UNKNOWN_CMD = 0x0,
index ca38e9817374cc6f4a462048c2d59a5244e33b1c..a81da4cde643a5e38659a3b7098a01bac8a49a29 100644 (file)
@@ -112,25 +112,27 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
        struct iwl_mvm *mvm =
                container_of(notif_wait, struct iwl_mvm, notif_wait);
        struct iwl_mvm_alive_data *alive_data = data;
-       struct mvm_alive_resp *palive;
+       struct mvm_alive_resp_ver1 *palive1;
        struct mvm_alive_resp_ver2 *palive2;
+       struct mvm_alive_resp *palive;
 
-       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
-               palive = (void *)pkt->data;
+       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
+               palive1 = (void *)pkt->data;
 
                mvm->support_umac_log = false;
                mvm->error_event_table =
-                       le32_to_cpu(palive->error_event_table_ptr);
-               mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
-               alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+                       le32_to_cpu(palive1->error_event_table_ptr);
+               mvm->log_event_table =
+                       le32_to_cpu(palive1->log_event_table_ptr);
+               alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
 
-               alive_data->valid = le16_to_cpu(palive->status) ==
+               alive_data->valid = le16_to_cpu(palive1->status) ==
                                    IWL_ALIVE_STATUS_OK;
                IWL_DEBUG_FW(mvm,
                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
-                            le16_to_cpu(palive->status), palive->ver_type,
-                            palive->ver_subtype, palive->flags);
-       } else {
+                            le16_to_cpu(palive1->status), palive1->ver_type,
+                            palive1->ver_subtype, palive1->flags);
+       } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
                palive2 = (void *)pkt->data;
 
                mvm->error_event_table =
@@ -156,6 +158,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                IWL_DEBUG_FW(mvm,
                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
                             palive2->umac_major, palive2->umac_minor);
+       } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+               palive = (void *)pkt->data;
+
+               mvm->error_event_table =
+                       le32_to_cpu(palive->error_event_table_ptr);
+               mvm->log_event_table =
+                       le32_to_cpu(palive->log_event_table_ptr);
+               alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+               mvm->umac_error_event_table =
+                       le32_to_cpu(palive->error_info_addr);
+               mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
+               mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
+
+               alive_data->valid = le16_to_cpu(palive->status) ==
+                                   IWL_ALIVE_STATUS_OK;
+               if (mvm->umac_error_event_table)
+                       mvm->support_umac_log = true;
+
+               IWL_DEBUG_FW(mvm,
+                            "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+                            le16_to_cpu(palive->status), palive->ver_type,
+                            palive->ver_subtype, palive->flags);
+
+               IWL_DEBUG_FW(mvm,
+                            "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+                            le32_to_cpu(palive->umac_major),
+                            le32_to_cpu(palive->umac_minor));
        }
 
        return true;
@@ -188,8 +217,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        struct iwl_sf_region st_fwrd_space;
 
        if (ucode_type == IWL_UCODE_REGULAR &&
-           iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) &&
-           iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
+           iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
                fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
        else
                fw = iwl_get_ucode_image(mvm, ucode_type);
@@ -451,20 +479,80 @@ exit:
        iwl_free_resp(&cmd);
 }
 
-void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
+int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
+                               struct iwl_mvm_dump_desc *desc,
+                               unsigned int delay)
 {
+       if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
+               return -EBUSY;
+
+       if (WARN_ON(mvm->fw_dump_desc))
+               iwl_mvm_free_fw_dump_desc(mvm);
+
+       IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
+                le32_to_cpu(desc->trig_desc.type));
+
+       mvm->fw_dump_desc = desc;
+
        /* stop recording */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
                iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
        } else {
                iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-               iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+               /* wait before we collect the data till the DBGC stop */
+               udelay(100);
        }
 
-       schedule_work(&mvm->fw_error_dump_wk);
+       queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
+
+       return 0;
 }
 
-int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
+int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
+                          const char *str, size_t len, unsigned int delay)
+{
+       struct iwl_mvm_dump_desc *desc;
+
+       desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+       if (!desc)
+               return -ENOMEM;
+
+       desc->len = len;
+       desc->trig_desc.type = cpu_to_le32(trig);
+       memcpy(desc->trig_desc.data, str, len);
+
+       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+}
+
+int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
+                               struct iwl_fw_dbg_trigger_tlv *trigger,
+                               const char *str, size_t len)
+{
+       unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+       u16 occurrences = le16_to_cpu(trigger->occurrences);
+       int ret;
+
+       if (!occurrences)
+               return 0;
+
+       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), str,
+                                    len, delay);
+       if (ret)
+               return ret;
+
+       trigger->occurrences = cpu_to_le16(occurrences - 1);
+       return 0;
+}
+
+static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
+{
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+               iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       else
+               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
+}
+
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
        u8 *ptr;
        int ret;
@@ -474,6 +562,14 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
                      "Invalid configuration %d\n", conf_id))
                return -EINVAL;
 
+       /* EARLY START - firmware's configuration is hard coded */
+       if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
+            !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+           conf_id == FW_DBG_START_FROM_ALIVE) {
+               iwl_mvm_restart_early_start(mvm);
+               return 0;
+       }
+
        if (!mvm->fw->dbg_conf_tlv[conf_id])
                return -EINVAL;
 
@@ -583,7 +679,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
 
        mvm->fw_dbg_conf = FW_DBG_INVALID;
-       iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
+       /* if we have a destination, assume EARLY START */
+       if (mvm->fw->dbg_dest_tlv)
+               mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
+       iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
 
        ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
        if (ret)
index 7bdc6220743f405c3eb1e8de88bfa6c725bf0891..581b3b8f29f9b6d7460b98eeb9ee54e3b9612c35 100644 (file)
@@ -244,6 +244,7 @@ static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *exclude_vif)
 {
+       u8 sta_id;
        struct iwl_mvm_hw_queues_iface_iterator_data data = {
                .exclude_vif = exclude_vif,
                .used_hw_queues =
@@ -264,6 +265,13 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                          iwl_mvm_mac_sta_hw_queues_iter,
                                          &data);
 
+       /*
+        * Some TDLS stations may be removed but are in the process of being
+        * drained. Don't touch their queues.
+        */
+       for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
+               data.used_hw_queues |= mvm->tfd_drained[sta_id];
+
        return data.used_hw_queues;
 }
 
@@ -1367,10 +1375,18 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
 {
        struct iwl_missed_beacons_notif *missed_beacons = _data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
+       struct iwl_fw_dbg_trigger_tlv *trigger;
+       u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+       u32 rx_missed_bcon, rx_missed_bcon_since_rx;
 
        if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
                return;
 
+       rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+       rx_missed_bcon_since_rx =
+               le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
        /*
         * TODO: the threshold should be adjusted based on latency conditions,
         * and/or in case of a CS flow on one of the other AP vifs.
@@ -1378,6 +1394,26 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
        if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
             IWL_MVM_MISSED_BEACONS_THRESHOLD)
                ieee80211_beacon_loss(vif);
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
+                                       FW_DBG_TRIGGER_MISSED_BEACONS))
+               return;
+
+       trigger = iwl_fw_dbg_get_trigger(mvm->fw,
+                                        FW_DBG_TRIGGER_MISSED_BEACONS);
+       bcon_trig = (void *)trigger->data;
+       stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+       stop_trig_missed_bcon_since_rx =
+               le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+
+       /* TODO: implement start trigger */
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
+               return;
+
+       if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+           rx_missed_bcon >= stop_trig_missed_bcon)
+               iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL, 0);
 }
 
 int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
index 1ff7ec08532d113aa5be85cb59eed8100e3f7eed..204255423d99499c54a4811ebb74504150fcd9f4 100644 (file)
@@ -339,13 +339,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
            !iwlwifi_mod_params.sw_crypto)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
-           mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-               hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
-               hw->wiphy->features |=
-                       NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
-                       NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
-       }
+       hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+       hw->wiphy->features |=
+               NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+               NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
 
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -889,12 +886,23 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
        iwl_trans_release_nic_access(mvm->trans, &flags);
 }
 
+void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
+{
+       if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
+           !mvm->fw_dump_desc)
+               return;
+
+       kfree(mvm->fw_dump_desc);
+       mvm->fw_dump_desc = NULL;
+}
+
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 {
        struct iwl_fw_error_dump_file *dump_file;
        struct iwl_fw_error_dump_data *dump_data;
        struct iwl_fw_error_dump_info *dump_info;
        struct iwl_fw_error_dump_mem *dump_mem;
+       struct iwl_fw_error_dump_trigger_desc *dump_trig;
        struct iwl_mvm_dump_ptrs *fw_error_dump;
        u32 sram_len, sram_ofs;
        u32 file_len, fifo_data_len = 0;
@@ -964,6 +972,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                   fifo_data_len +
                   sizeof(*dump_info);
 
+       if (mvm->fw_dump_desc)
+               file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
+                           mvm->fw_dump_desc->len;
+
        /* Make room for the SMEM, if it exists */
        if (smem_len)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
@@ -975,6 +987,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
+               iwl_mvm_free_fw_dump_desc(mvm);
                return;
        }
 
@@ -1003,6 +1016,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
                iwl_mvm_dump_fifos(mvm, &dump_data);
 
+       if (mvm->fw_dump_desc) {
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
+               dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
+                                            mvm->fw_dump_desc->len);
+               dump_trig = (void *)dump_data->data;
+               memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
+                      sizeof(*dump_trig) + mvm->fw_dump_desc->len);
+
+               /* now we can free this copy */
+               iwl_mvm_free_fw_dump_desc(mvm);
+               dump_data = iwl_fw_error_next_data(dump_data);
+       }
+
        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
        dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
        dump_mem = (void *)dump_data->data;
@@ -1041,16 +1067,26 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
                      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
+
+       clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
+struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
+       .trig_desc = {
+               .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
+       },
+};
+
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
        /* clear the D3 reconfig, we only need it to avoid dumping a
         * firmware coredump on reconfiguration, we shouldn't do that
         * on D3->D0 transition
         */
-       if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
+       if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
+               mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
                iwl_mvm_fw_error_dump(mvm);
+       }
 
        /* cleanup all stale references (scan, roc), but keep the
         * ucode_down ref until reconfig is complete
@@ -1091,6 +1127,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
+       mvm->fw_dbg_conf = FW_DBG_INVALID;
+
+       /* keep statistics ticking */
+       iwl_mvm_accu_radio_stats(mvm);
 }
 
 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
@@ -1213,6 +1253,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
 {
        lockdep_assert_held(&mvm->mutex);
 
+       /* firmware counters are obviously reset now, but we shouldn't
+        * partially track so also clear the fw_reset_accu counters.
+        */
+       memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
+
        /*
         * Disallow low power states when the FW is down by taking
         * the UCODE_DOWN ref. in case of ongoing hw restart the
@@ -1252,7 +1297,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 
        flush_work(&mvm->d0i3_exit_work);
        flush_work(&mvm->async_handlers_wk);
-       flush_work(&mvm->fw_error_dump_wk);
+       cancel_delayed_work_sync(&mvm->fw_dump_wk);
+       iwl_mvm_free_fw_dump_desc(mvm);
 
        mutex_lock(&mvm->mutex);
        __iwl_mvm_mac_stop(mvm);
@@ -1300,6 +1346,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
+       mvmvif->mvm = mvm;
+
        /*
         * make sure D0i3 exit is completed, otherwise a target access
         * during tx queue configuration could be done when still in
@@ -1317,6 +1365,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       /* make sure that beacon statistics don't go backwards with FW reset */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               mvmvif->beacon_stats.accu_num_beacons +=
+                       mvmvif->beacon_stats.num_beacons;
+
        /* Allocate resources for the MAC context, and add it to the fw  */
        ret = iwl_mvm_mac_ctxt_init(mvm, vif);
        if (ret)
@@ -1810,6 +1863,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 
        if (changes & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
+                       /* clear statistics to get clean beacon counter */
+                       iwl_mvm_request_statistics(mvm, true);
+                       memset(&mvmvif->beacon_stats, 0,
+                              sizeof(mvmvif->beacon_stats));
+
                        /* add quota for this interface */
                        ret = iwl_mvm_update_quotas(mvm, NULL);
                        if (ret) {
@@ -2196,10 +2254,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
                ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
-       else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
        else
-               ret = iwl_mvm_scan_request(mvm, vif, req);
+               ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
 
        if (ret)
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
@@ -2527,13 +2583,7 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       /* Newest FW fixes sched scan while connected on another interface */
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
-               if (!vif->bss_conf.idle) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-       } else if (!iwl_mvm_is_idle(mvm)) {
+       if (!vif->bss_conf.idle) {
                ret = -EBUSY;
                goto out;
        }
@@ -3433,6 +3483,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
                           chsw->chandef.center_freq1);
 
+       iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH,
+                                      NULL, 0);
+
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
                csa_vif =
@@ -3581,6 +3634,95 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        }
 }
 
+static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+                                 struct survey_info *survey)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       memset(survey, 0, sizeof(*survey));
+
+       /* only support global statistics right now */
+       if (idx != 0)
+               return -ENOENT;
+
+       if (!(mvm->fw->ucode_capa.capa[0] &
+                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+               return -ENOENT;
+
+       mutex_lock(&mvm->mutex);
+
+       if (mvm->ucode_loaded) {
+               ret = iwl_mvm_request_statistics(mvm, false);
+               if (ret)
+                       goto out;
+       }
+
+       survey->filled = SURVEY_INFO_TIME |
+                        SURVEY_INFO_TIME_RX |
+                        SURVEY_INFO_TIME_TX |
+                        SURVEY_INFO_TIME_SCAN;
+       survey->time = mvm->accu_radio_stats.on_time_rf +
+                      mvm->radio_stats.on_time_rf;
+       do_div(survey->time, USEC_PER_MSEC);
+
+       survey->time_rx = mvm->accu_radio_stats.rx_time +
+                         mvm->radio_stats.rx_time;
+       do_div(survey->time_rx, USEC_PER_MSEC);
+
+       survey->time_tx = mvm->accu_radio_stats.tx_time +
+                         mvm->radio_stats.tx_time;
+       do_div(survey->time_tx, USEC_PER_MSEC);
+
+       survey->time_scan = mvm->accu_radio_stats.on_time_scan +
+                           mvm->radio_stats.on_time_scan;
+       do_div(survey->time_scan, USEC_PER_MSEC);
+
+ out:
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_sta *sta,
+                                      struct station_info *sinfo)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+       if (!(mvm->fw->ucode_capa.capa[0] &
+                               IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+               return;
+
+       /* if beacon filtering isn't on mac80211 does it anyway */
+       if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+               return;
+
+       if (!vif->bss_conf.assoc)
+               return;
+
+       mutex_lock(&mvm->mutex);
+
+       if (mvmvif->ap_sta_id != mvmsta->sta_id)
+               goto unlock;
+
+       if (iwl_mvm_request_statistics(mvm, false))
+               goto unlock;
+
+       sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
+                          mvmvif->beacon_stats.accu_num_beacons;
+       sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+       if (mvmvif->beacon_stats.avg_signal) {
+               /* firmware only reports a value after RXing a few beacons */
+               sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
+               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+       }
+ unlock:
+       mutex_unlock(&mvm->mutex);
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -3647,4 +3789,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
 #endif
        .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
 #endif
+       .get_survey = iwl_mvm_mac_get_survey,
+       .sta_statistics = iwl_mvm_mac_sta_statistics,
 };
index 6c69d0584f6c880b917337d3ae011ebbac94a9e2..e10172d69eaa16be03e4a5b4bf7eeda15821dfbb 100644 (file)
@@ -75,6 +75,7 @@
 #include "iwl-trans.h"
 #include "iwl-notif-wait.h"
 #include "iwl-eeprom-parse.h"
+#include "iwl-fw-file.h"
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
@@ -145,6 +146,19 @@ struct iwl_mvm_dump_ptrs {
        u32 op_mode_len;
 };
 
+/**
+ * struct iwl_mvm_dump_desc - describes the dump
+ * @len: length of trig_desc->data
+ * @trig_desc: the description of the dump
+ */
+struct iwl_mvm_dump_desc {
+       size_t len;
+       /* must be last */
+       struct iwl_fw_error_dump_trigger_desc trig_desc;
+};
+
+extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
+
 struct iwl_mvm_phy_ctxt {
        u16 id;
        u16 color;
@@ -337,8 +351,12 @@ struct iwl_mvm_vif_bf_data {
  * @beacon_skb: the skb used to hold the AP/GO beacon template
  * @smps_requests: the SMPS requests of differents parts of the driver,
  *     combined on update to yield the overall request to mac80211.
+ * @beacon_stats: beacon statistics, containing the # of received beacons,
+ *     # of received beacons accumulated over FW restart, and the current
+ *     average signal of beacons retrieved from the firmware
  */
 struct iwl_mvm_vif {
+       struct iwl_mvm *mvm;
        u16 id;
        u16 color;
        u8 ap_sta_id;
@@ -354,6 +372,11 @@ struct iwl_mvm_vif {
        bool ps_disabled;
        struct iwl_mvm_vif_bf_data bf_data;
 
+       struct {
+               u32 num_beacons, accu_num_beacons;
+               u8 avg_signal;
+       } beacon_stats;
+
        u32 ap_beacon_time;
 
        enum iwl_tsf_id tsf_id;
@@ -396,7 +419,6 @@ struct iwl_mvm_vif {
 #endif
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       struct iwl_mvm *mvm;
        struct dentry *dbgfs_dir;
        struct dentry *dbgfs_slink;
        struct iwl_dbgfs_pm dbgfs_pm;
@@ -593,6 +615,13 @@ struct iwl_mvm {
 
        struct mvm_statistics_rx rx_stats;
 
+       struct {
+               u64 rx_time;
+               u64 tx_time;
+               u64 on_time_rf;
+               u64 on_time_scan;
+       } radio_stats, accu_radio_stats;
+
        u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
        atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
@@ -666,6 +695,7 @@ struct iwl_mvm {
 
        struct iwl_mvm_frame_stats drv_rx_stats;
        spinlock_t drv_stats_lock;
+       u16 dbgfs_rx_phyinfo;
 #endif
 
        struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -687,8 +717,9 @@ struct iwl_mvm {
 
        /* -1 for always, 0 for never, >0 for that many times */
        s8 restart_fw;
-       struct work_struct fw_error_dump_wk;
-       enum iwl_fw_dbg_conf fw_dbg_conf;
+       u8 fw_dbg_conf;
+       struct delayed_work fw_dump_wk;
+       struct iwl_mvm_dump_desc *fw_dump_desc;
 
 #ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
@@ -824,6 +855,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_IN_D0I3,
        IWL_MVM_STATUS_ROC_AUX_RUNNING,
        IWL_MVM_STATUS_D3_RECONFIG,
+       IWL_MVM_STATUS_DUMPING_FW_LOG,
 };
 
 static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -883,6 +915,12 @@ static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
        return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
 }
 
+static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
+{
+       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+               IWL_MVM_BT_COEX_CORUNNING;
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -951,12 +989,13 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 }
 
 /* Statistics */
-int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+                                 struct iwl_rx_packet *pkt);
 int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
                          struct iwl_rx_cmd_buffer *rxb,
                          struct iwl_device_cmd *cmd);
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
 /* NVM */
 int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
@@ -1072,13 +1111,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
 
 /* Scanning */
 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_scan_request(struct iwl_mvm *mvm,
-                        struct ieee80211_vif *vif,
-                        struct cfg80211_scan_request *req);
-int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
 int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
 
@@ -1089,14 +1121,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
 int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
                                                struct iwl_rx_cmd_buffer *rxb,
                                                struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
-                             struct ieee80211_vif *vif,
-                             struct cfg80211_sched_scan_request *req,
-                             struct ieee80211_scan_ies *ies);
 int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
-                            struct cfg80211_sched_scan_request *req);
 int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif,
                               struct cfg80211_sched_scan_request *req,
@@ -1238,7 +1264,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 
-bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
@@ -1352,9 +1377,6 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
        iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
 }
 
-/* Assoc status */
-bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
-
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@@ -1405,7 +1427,62 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 
-int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id);
-void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm);
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
+                          const char *str, size_t len, unsigned int delay);
+int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
+                               struct iwl_mvm_dump_desc *desc,
+                               unsigned int delay);
+void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
+int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
+                               struct iwl_fw_dbg_trigger_tlv *trigger,
+                               const char *str, size_t len);
+
+static inline bool
+iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
+                            struct ieee80211_vif *vif)
+{
+       u32 trig_vif = le32_to_cpu(trig->vif_type);
+
+       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
+                                  struct iwl_fw_dbg_trigger_tlv *trig)
+{
+       return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
+               (mvm->fw_dbg_conf == FW_DBG_INVALID ||
+               (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
+}
+
+static inline bool
+iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             struct iwl_fw_dbg_trigger_tlv *trig)
+{
+       if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
+               return false;
+
+       return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
+}
+
+static inline void
+iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
+                              struct ieee80211_vif *vif,
+                              enum iwl_fw_dbg_trigger trig,
+                              const char *str, size_t len)
+{
+       struct iwl_fw_dbg_trigger_tlv *trigger;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig))
+               return;
+
+       trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig);
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trigger, str, len);
+}
 
 #endif /* __IWL_MVM_H__ */
index 2dffc3600ed3faac6cf2777cc7a1f862c29e7b24..fe40922a6b0d467c86da151e2c786a265e46a1e2 100644 (file)
@@ -237,8 +237,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
 
        RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
 
-       RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
-       RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
        RX_HANDLER(SCAN_ITERATION_COMPLETE,
                   iwl_mvm_rx_scan_offload_iter_complete_notif, false),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
@@ -311,6 +309,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(REPLY_RX_MPDU_CMD),
        CMD(BEACON_NOTIFICATION),
        CMD(BEACON_TEMPLATE_CMD),
+       CMD(STATISTICS_CMD),
        CMD(STATISTICS_NOTIFICATION),
        CMD(EOSP_NOTIFICATION),
        CMD(REDUCE_TX_POWER_CMD),
@@ -456,7 +455,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
        INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
        INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
-       INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk);
+       INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
        INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
 
        spin_lock_init(&mvm->d0i3_tx_lock);
@@ -504,6 +503,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
        memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
               sizeof(trans->dbg_conf_tlv));
+       trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
 
        /* set up notification wait support */
        iwl_notification_wait_init(&mvm->notif_wait);
@@ -685,6 +685,38 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        mutex_unlock(&mvm->mutex);
 }
 
+static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
+                                           struct iwl_rx_packet *pkt)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_cmd *cmds_trig;
+       char buf[32];
+       int i;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
+       cmds_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
+               /* don't collect on CMD 0 */
+               if (!cmds_trig->cmds[i].cmd_id)
+                       break;
+
+               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+                       continue;
+
+               memset(buf, 0, sizeof(buf));
+               snprintf(buf, sizeof(buf), "CMD 0x%02x received", pkt->hdr.cmd);
+               iwl_mvm_fw_dbg_collect_trig(mvm, trig, buf, sizeof(buf));
+               break;
+       }
+}
+
 static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                               struct iwl_rx_cmd_buffer *rxb,
                               struct iwl_device_cmd *cmd)
@@ -693,6 +725,8 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u8 i;
 
+       iwl_mvm_rx_check_trigger(mvm, pkt);
+
        /*
         * Do the notification wait before RX handlers so
         * even if the RX handler consumes the RXB we have
@@ -827,7 +861,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
 static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 {
        struct iwl_mvm *mvm =
-               container_of(work, struct iwl_mvm, fw_error_dump_wk);
+               container_of(work, struct iwl_mvm, fw_dump_wk.work);
 
        if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
                return;
@@ -879,7 +913,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * can't recover this since we're already half suspended.
         */
        if (!mvm->restart_fw && fw_error) {
-               schedule_work(&mvm->fw_error_dump_wk);
+               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
        } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
                                    &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
index 5b43616eeb06332dedd68299650583ec292b201f..192b74bc8cf67270a7db805f846f16abf7a23081 100644 (file)
@@ -175,6 +175,10 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
        cmd->rxchain_info |= cpu_to_le32(active_cnt <<
                                         PHY_RX_CHAIN_MIMO_CNT_POS);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (unlikely(mvm->dbgfs_rx_phyinfo))
+               cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+#endif
 
        cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
 }
index 2620dd0c45f9638c949fd34b7482533b13c33126..33bbdde0046fa29575f7fd72c9e2d1a1af144bd1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/etherdevice.h>
 
 #include <net/mac80211.h>
 
@@ -491,7 +492,7 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
                   ETH_ALEN))
-               memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+               eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
 }
 
 static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
index 194bd1f939ca3c6a42e9c4a9d6cc7eb843c2d256..6578498dd5afde7ee13915f6e954cbfb84a3239d 100644 (file)
@@ -134,9 +134,12 @@ enum rs_column_mode {
 #define MAX_NEXT_COLUMNS 7
 #define MAX_COLUMN_CHECKS 3
 
+struct rs_tx_column;
+
 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta,
-                                    struct iwl_scale_tbl_info *tbl);
+                                    struct iwl_scale_tbl_info *tbl,
+                                    const struct rs_tx_column *next_col);
 
 struct rs_tx_column {
        enum rs_column_mode mode;
@@ -147,14 +150,19 @@ struct rs_tx_column {
 };
 
 static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl)
+                        struct iwl_scale_tbl_info *tbl,
+                        const struct rs_tx_column *next_col)
 {
-       return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
+       return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
 }
 
 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl)
+                         struct iwl_scale_tbl_info *tbl,
+                         const struct rs_tx_column *next_col)
 {
+       struct iwl_mvm_sta *mvmsta;
+       struct iwl_mvm_vif *mvmvif;
+
        if (!sta->ht_cap.ht_supported)
                return false;
 
@@ -167,11 +175,17 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
                return false;
 
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+               return false;
+
        return true;
 }
 
 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl)
+                         struct iwl_scale_tbl_info *tbl,
+                         const struct rs_tx_column *next_col)
 {
        if (!sta->ht_cap.ht_supported)
                return false;
@@ -180,7 +194,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl)
+                        struct iwl_scale_tbl_info *tbl,
+                        const struct rs_tx_column *next_col)
 {
        struct rs_rate *rate = &tbl->rate;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -800,6 +815,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
                rate->ldpc = true;
        if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
                rate->stbc = true;
+       if (ucode_rate & RATE_MCS_BF_MSK)
+               rate->bfer = true;
 
        rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
 
@@ -809,7 +826,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 
                if (nss == 1) {
                        rate->type = LQ_HT_SISO;
-                       WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
+                       WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+                                 "stbc %d bfer %d",
+                                 rate->stbc, rate->bfer);
                } else if (nss == 2) {
                        rate->type = LQ_HT_MIMO2;
                        WARN_ON_ONCE(num_of_ant != 2);
@@ -822,7 +841,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
 
                if (nss == 1) {
                        rate->type = LQ_VHT_SISO;
-                       WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
+                       WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+                                 "stbc %d bfer %d",
+                                 rate->stbc, rate->bfer);
                } else if (nss == 2) {
                        rate->type = LQ_VHT_MIMO2;
                        WARN_ON_ONCE(num_of_ant != 2);
@@ -1001,13 +1022,41 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/* Simple function to compare two rate scale table types */
-static inline bool rs_rate_match(struct rs_rate *a,
-                                struct rs_rate *b)
+/* Check if both rates are identical
+ * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
+ * with a rate indicating STBC/BFER and ANT_AB.
+ */
+static inline bool rs_rate_equal(struct rs_rate *a,
+                                struct rs_rate *b,
+                                bool allow_ant_mismatch)
+
+{
+       bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
+               (a->bfer == b->bfer);
+
+       if (allow_ant_mismatch) {
+               if (a->stbc || a->bfer) {
+                       WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
+                                 a->stbc, a->bfer, a->ant);
+                       ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
+               } else if (b->stbc || b->bfer) {
+                       WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
+                                 b->stbc, b->bfer, b->ant);
+                       ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
+               }
+       }
+
+       return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
+               (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
+}
+
+/* Check if both rates share the same column */
+static inline bool rs_rate_column_match(struct rs_rate *a,
+                                       struct rs_rate *b)
 {
        bool ant_match;
 
-       if (a->stbc)
+       if (a->stbc || a->bfer)
                ant_match = (b->ant == ANT_A || b->ant == ANT_B);
        else
                ant_match = (a->ant == b->ant);
@@ -1016,18 +1065,6 @@ static inline bool rs_rate_match(struct rs_rate *a,
                && ant_match;
 }
 
-static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
-{
-       if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-               return RATE_MCS_CHAN_WIDTH_40;
-       else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
-               return RATE_MCS_CHAN_WIDTH_80;
-       else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
-               return RATE_MCS_CHAN_WIDTH_160;
-
-       return RATE_MCS_CHAN_WIDTH_20;
-}
-
 static u8 rs_get_tid(struct ieee80211_hdr *hdr)
 {
        u8 tid = IWL_MAX_TID_COUNT;
@@ -1048,15 +1085,17 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 {
        int legacy_success;
        int retries;
-       int mac_index, i;
+       int i;
        struct iwl_lq_cmd *table;
-       enum mac80211_rate_control_flags mac_flags;
-       u32 ucode_rate;
-       struct rs_rate rate;
+       u32 lq_hwrate;
+       struct rs_rate lq_rate, tx_resp_rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
        u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+       u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
+               IWL_UCODE_TLV_API_LQ_SS_PARAMS;
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1079,39 +1118,6 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
            !(info->flags & IEEE80211_TX_STAT_AMPDU))
                return;
 
-       /*
-        * Ignore this Tx frame response if its initial rate doesn't match
-        * that of latest Link Quality command.  There may be stragglers
-        * from a previous Link Quality command, but we're no longer interested
-        * in those; they're either from the "active" mode while we're trying
-        * to check "search" mode, or a prior "search" mode after we've moved
-        * to a new "search" mode (which might become the new "active" mode).
-        */
-       table = &lq_sta->lq;
-       ucode_rate = le32_to_cpu(table->rs_table[0]);
-       rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
-       if (info->band == IEEE80211_BAND_5GHZ)
-               rate.index -= IWL_FIRST_OFDM_RATE;
-       mac_flags = info->status.rates[0].flags;
-       mac_index = info->status.rates[0].idx;
-       /* For HT packets, map MCS to PLCP */
-       if (mac_flags & IEEE80211_TX_RC_MCS) {
-               /* Remove # of streams */
-               mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
-               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
-                       mac_index++;
-               /*
-                * mac80211 HT index is always zero-indexed; we need to move
-                * HT OFDM rates after CCK rates in 2.4 GHz band
-                */
-               if (info->band == IEEE80211_BAND_2GHZ)
-                       mac_index += IWL_FIRST_OFDM_RATE;
-       } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
-               mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
-               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
-                       mac_index++;
-       }
-
        if (time_after(jiffies,
                       (unsigned long)(lq_sta->last_tx +
                                       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
@@ -1126,21 +1132,24 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        }
        lq_sta->last_tx = jiffies;
 
+       /* Ignore this Tx frame response if its initial rate doesn't match
+        * that of latest Link Quality command.  There may be stragglers
+        * from a previous Link Quality command, but we're no longer interested
+        * in those; they're either from the "active" mode while we're trying
+        * to check "search" mode, or a prior "search" mode after we've moved
+        * to a new "search" mode (which might become the new "active" mode).
+        */
+       table = &lq_sta->lq;
+       lq_hwrate = le32_to_cpu(table->rs_table[0]);
+       rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
+       rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+
        /* Here we actually compare this rate to the latest LQ command */
-       if ((mac_index < 0) ||
-           (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-           (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
-           (rate.ant != info->status.antenna) ||
-           (!!(ucode_rate & RATE_MCS_HT_MSK) !=
-            !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
-           (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
-            !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
-           (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
-            !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
-           (rate.index != mac_index)) {
+       if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
                IWL_DEBUG_RATE(mvm,
-                              "initial rate %d does not match %d (0x%x)\n",
-                              mac_index, rate.index, ucode_rate);
+                              "initial tx resp rate 0x%x does not match 0x%x\n",
+                              tx_resp_hwrate, lq_hwrate);
+
                /*
                 * Since rates mis-match, the last LQ command may have failed.
                 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -1168,14 +1177,14 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        }
 
-       if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) {
+       if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
                IWL_DEBUG_RATE(mvm,
                               "Neither active nor search matches tx rate\n");
                tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
                rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
                tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
                rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
-               rs_dump_rate(mvm, &rate, "ACTUAL");
+               rs_dump_rate(mvm, &lq_rate, "ACTUAL");
 
                /*
                 * no matching table found, let's by-pass the data collection
@@ -1200,9 +1209,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                if (info->status.ampdu_ack_len == 0)
                        info->status.ampdu_len = 1;
 
-               ucode_rate = le32_to_cpu(table->rs_table[0]);
-               rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
-               rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index,
+               rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
                                   info->status.ampdu_len,
                                   info->status.ampdu_ack_len,
                                   reduced_txp);
@@ -1225,21 +1232,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
                /* Collect data for each rate used during failed TX attempts */
                for (i = 0; i <= retries; ++i) {
-                       ucode_rate = le32_to_cpu(table->rs_table[i]);
-                       rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
+                       lq_hwrate = le32_to_cpu(table->rs_table[i]);
+                       rs_rate_from_ucode_rate(lq_hwrate, info->band,
+                                               &lq_rate);
                        /*
                         * Only collect stats if retried rate is in the same RS
                         * table as active/search.
                         */
-                       if (rs_rate_match(&rate, &curr_tbl->rate))
+                       if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
                                tmp_tbl = curr_tbl;
-                       else if (rs_rate_match(&rate, &other_tbl->rate))
+                       else if (rs_rate_column_match(&lq_rate,
+                                                     &other_tbl->rate))
                                tmp_tbl = other_tbl;
                        else
                                continue;
 
-                       rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1,
-                                          i < retries ? 0 : legacy_success,
+                       rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
+                                          1, i < retries ? 0 : legacy_success,
                                           reduced_txp);
                }
 
@@ -1250,7 +1259,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                }
        }
        /* The last TX rate is cached in lq_sta; it's set in if/else above */
-       lq_sta->last_rate_n_flags = ucode_rate;
+       lq_sta->last_rate_n_flags = lq_hwrate;
        IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
 done:
        /* See if there's a better rate or modulation mode to try. */
@@ -1590,7 +1599,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 
                for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
                        allow_func = next_col->checks[j];
-                       if (allow_func && !allow_func(mvm, sta, tbl))
+                       if (allow_func && !allow_func(mvm, sta, tbl, next_col))
                                break;
                }
 
@@ -2536,6 +2545,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 #ifdef CONFIG_MAC80211_DEBUGFS
        lq_sta->pers.dbg_fixed_rate = 0;
        lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
+       lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
 #endif
        lq_sta->pers.chains = 0;
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
@@ -3058,19 +3068,21 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
                goto out;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Check if forcing the decision is configured.
         * Note that SISO is forced by not allowing STBC or BFER
         */
-       if (lq_sta->ss_force == RS_SS_FORCE_STBC)
+       if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
                ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
-       else if (lq_sta->ss_force == RS_SS_FORCE_BFER)
+       else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
                ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
 
-       if (lq_sta->ss_force != RS_SS_FORCE_NONE) {
+       if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
                IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
-                              lq_sta->ss_force);
+                              lq_sta->pers.ss_force);
                goto out;
        }
+#endif
 
        if (lq_sta->stbc_capable)
                ss_params |= LQ_SS_STBC_1SS_ALLOWED;
@@ -3311,6 +3323,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        struct iwl_mvm *mvm;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct rs_rate *rate = &tbl->rate;
+       u32 ss_params;
        mvm = lq_sta->pers.drv;
        buff = kmalloc(2048, GFP_KERNEL);
        if (!buff)
@@ -3357,6 +3370,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        lq_sta->lq.agg_frame_cnt_limit);
 
        desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+       ss_params = le32_to_cpu(lq_sta->lq.ss_params);
+       desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+                       (ss_params & LQ_SS_PARAMS_VALID) ?
+                       "VALID," : "INVALID",
+                       (ss_params & LQ_SS_BFER_ALLOWED) ?
+                       "BFER," : "",
+                       (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
+                       "STBC," : "",
+                       (ss_params & LQ_SS_FORCE) ?
+                       "FORCE" : "");
        desc += sprintf(buff+desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
@@ -3533,7 +3556,7 @@ static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
        };
 
        pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
-                        ss_force_name[lq_sta->ss_force]);
+                        ss_force_name[lq_sta->pers.ss_force]);
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
@@ -3544,12 +3567,12 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
        int ret = 0;
 
        if (!strncmp("none", buf, 4)) {
-               lq_sta->ss_force = RS_SS_FORCE_NONE;
+               lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
        } else if (!strncmp("siso", buf, 4)) {
-               lq_sta->ss_force = RS_SS_FORCE_SISO;
+               lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
        } else if (!strncmp("stbc", buf, 4)) {
                if (lq_sta->stbc_capable) {
-                       lq_sta->ss_force = RS_SS_FORCE_STBC;
+                       lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
                } else {
                        IWL_ERR(mvm,
                                "can't force STBC. peer doesn't support\n");
@@ -3557,7 +3580,7 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
                }
        } else if (!strncmp("bfer", buf, 4)) {
                if (lq_sta->bfer_capable) {
-                       lq_sta->ss_force = RS_SS_FORCE_BFER;
+                       lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
                } else {
                        IWL_ERR(mvm,
                                "can't force BFER. peer doesn't support\n");
index dc4ef3dfafe192880bf6f42a9c6ab4901b75d861..e4aa9346a23103f4eb660d087d0f7b8542334e42 100644 (file)
@@ -170,6 +170,7 @@ struct rs_rate {
        bool sgi;
        bool ldpc;
        bool stbc;
+       bool bfer;
 };
 
 
@@ -331,14 +332,14 @@ struct iwl_lq_sta {
        /* tx power reduce for this sta */
        int tpc_reduce;
 
-       /* force STBC/BFER/SISO for testing */
-       enum rs_ss_force_opt ss_force;
-
        /* persistent fields - initialized only once - keep last! */
        struct lq_sta_pers {
 #ifdef CONFIG_MAC80211_DEBUGFS
                u32 dbg_fixed_rate;
                u8 dbg_fixed_txp_reduction;
+
+               /* force STBC/BFER/SISO for testing */
+               enum rs_ss_force_opt ss_force;
 #endif
                u8 chains;
                s8 chain_signal[IEEE80211_MAX_CHAINS];
index f922131b4eaba7a66ab8d2e14f749f5d8cb56206..6177e24f4c016d09c8496186d65c394d05bb5eb3 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -345,6 +345,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                struct iwl_mvm_sta *mvmsta;
                mvmsta = iwl_mvm_sta_from_mac80211(sta);
                rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+
+               if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+                   ieee80211_is_beacon(hdr->frame_control)) {
+                       struct iwl_fw_dbg_trigger_tlv *trig;
+                       struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+                       bool trig_check;
+                       s32 rssi;
+
+                       trig = iwl_fw_dbg_get_trigger(mvm->fw,
+                                                     FW_DBG_TRIGGER_RSSI);
+                       rssi_trig = (void *)trig->data;
+                       rssi = le32_to_cpu(rssi_trig->rssi);
+
+                       trig_check =
+                               iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
+                                                             trig);
+                       if (trig_check && rx_status->signal < rssi)
+                               iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
+               }
        }
 
        rcu_read_unlock();
@@ -416,35 +435,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
-                                        struct iwl_notif_statistics *stats)
+                                        struct mvm_statistics_rx *rx_stats)
 {
-       /*
-        * NOTE FW aggregates the statistics - BUT the statistics are cleared
-        * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
-        * bit set.
-        */
        lockdep_assert_held(&mvm->mutex);
-       memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
+
+       mvm->rx_stats = *rx_stats;
 }
 
 struct iwl_mvm_stat_data {
-       struct iwl_notif_statistics *stats;
        struct iwl_mvm *mvm;
+       __le32 mac_id;
+       __s8 beacon_filter_average_energy;
+       struct mvm_statistics_general_v8 *general;
 };
 
 static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
                                  struct ieee80211_vif *vif)
 {
        struct iwl_mvm_stat_data *data = _data;
-       struct iwl_notif_statistics *stats = data->stats;
        struct iwl_mvm *mvm = data->mvm;
-       int sig = -stats->general.beacon_filter_average_energy;
+       int sig = -data->beacon_filter_average_energy;
        int last_event;
        int thold = vif->bss_conf.cqm_rssi_thold;
        int hyst = vif->bss_conf.cqm_rssi_hyst;
-       u16 id = le32_to_cpu(stats->rx.general.mac_id);
+       u16 id = le32_to_cpu(data->mac_id);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
+       /* This doesn't need the MAC ID check since it's not taking the
+        * data copied into the "data" struct, but rather the data from
+        * the notification directly.
+        */
+       if (data->general) {
+               mvmvif->beacon_stats.num_beacons =
+                       le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
+               mvmvif->beacon_stats.avg_signal =
+                       -data->general->beacon_average_energy[mvmvif->id];
+       }
+
        if (mvmvif->id != id)
                return;
 
@@ -500,34 +527,101 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
        }
 }
 
-/*
- * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
- *
- * TODO: This handler is implemented partially.
- */
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+static inline void
+iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
 {
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_notif_statistics *stats = (void *)&pkt->data;
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_stats *trig_stats;
+       u32 trig_offset, trig_thold;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
+       trig_stats = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       trig_offset = le32_to_cpu(trig_stats->stop_offset);
+       trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+
+       if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
+               return;
+
+       if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+                                 struct iwl_rx_packet *pkt)
+{
+       size_t v8_len = sizeof(struct iwl_notif_statistics_v8);
+       size_t v10_len = sizeof(struct iwl_notif_statistics_v10);
        struct iwl_mvm_stat_data data = {
-               .stats = stats,
                .mvm = mvm,
        };
+       u32 temperature;
+
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+               struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
+
+               if (iwl_rx_packet_payload_len(pkt) != v10_len)
+                       goto invalid;
+
+               temperature = le32_to_cpu(stats->general.radio_temperature);
+               data.mac_id = stats->rx.general.mac_id;
+               data.beacon_filter_average_energy =
+                       stats->general.beacon_filter_average_energy;
+
+               iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+
+               mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
+               mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
+               mvm->radio_stats.on_time_rf =
+                       le64_to_cpu(stats->general.on_time_rf);
+               mvm->radio_stats.on_time_scan =
+                       le64_to_cpu(stats->general.on_time_scan);
+
+               data.general = &stats->general;
+       } else {
+               struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data;
+
+               if (iwl_rx_packet_payload_len(pkt) != v8_len)
+                       goto invalid;
+
+               temperature = le32_to_cpu(stats->general.radio_temperature);
+               data.mac_id = stats->rx.general.mac_id;
+               data.beacon_filter_average_energy =
+                       stats->general.beacon_filter_average_energy;
+
+               iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+       }
+
+       iwl_mvm_rx_stats_check_trigger(mvm, pkt);
 
        /* Only handle rx statistics temperature changes if async temp
         * notifications are not supported
         */
        if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
-               iwl_mvm_tt_temp_changed(mvm,
-                               le32_to_cpu(stats->general.radio_temperature));
-
-       iwl_mvm_update_rx_statistics(mvm, stats);
+               iwl_mvm_tt_temp_changed(mvm, temperature);
 
        ieee80211_iterate_active_interfaces(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_stat_iterator,
                                            &data);
+       return;
+ invalid:
+       IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
+               iwl_rx_packet_payload_len(pkt));
+}
+
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                         struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
        return 0;
 }
index 7e9aa3cb325401fcf4105e278294b11657a90df1..f0946b5dd7c88b2b833d59a60c0dd753e8399352 100644 (file)
@@ -82,6 +82,7 @@ struct iwl_mvm_scan_params {
        struct _dwell {
                u16 passive;
                u16 active;
+               u16 fragmented;
        } dwell[IEEE80211_NUM_BANDS];
 };
 
@@ -191,101 +192,6 @@ static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
 
-static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
-                                      struct cfg80211_scan_request *req,
-                                      bool basic_ssid,
-                                      struct iwl_mvm_scan_params *params)
-{
-       struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
-               (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
-       int i;
-       int type = BIT(req->n_ssids) - 1;
-       enum ieee80211_band band = req->channels[0]->band;
-
-       if (!basic_ssid)
-               type |= BIT(req->n_ssids);
-
-       for (i = 0; i < cmd->channel_count; i++) {
-               chan->channel = cpu_to_le16(req->channels[i]->hw_value);
-               chan->type = cpu_to_le32(type);
-               if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
-                       chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
-               chan->active_dwell = cpu_to_le16(params->dwell[band].active);
-               chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
-               chan->iteration_count = cpu_to_le16(1);
-               chan++;
-       }
-}
-
-/*
- * Fill in probe request with the following parameters:
- * TA is our vif HW address, which mac80211 ensures we have.
- * Packet is broadcasted, so this is both SA and DA.
- * The probe request IE is made out of two: first comes the most prioritized
- * SSID if a directed scan is requested. Second comes whatever extra
- * information was given to us as the scan request IE.
- */
-static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
-                                 int n_ssids, const u8 *ssid, int ssid_len,
-                                 const u8 *band_ie, int band_ie_len,
-                                 const u8 *common_ie, int common_ie_len,
-                                 int left)
-{
-       int len = 0;
-       u8 *pos = NULL;
-
-       /* Make sure there is enough space for the probe request,
-        * two mandatory IEs and the data */
-       left -= 24;
-       if (left < 0)
-               return 0;
-
-       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       eth_broadcast_addr(frame->da);
-       memcpy(frame->sa, ta, ETH_ALEN);
-       eth_broadcast_addr(frame->bssid);
-       frame->seq_ctrl = 0;
-
-       len += 24;
-
-       /* for passive scans, no need to fill anything */
-       if (n_ssids == 0)
-               return (u16)len;
-
-       /* points to the payload of the request */
-       pos = &frame->u.probe_req.variable[0];
-
-       /* fill in our SSID IE */
-       left -= ssid_len + 2;
-       if (left < 0)
-               return 0;
-       *pos++ = WLAN_EID_SSID;
-       *pos++ = ssid_len;
-       if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
-               memcpy(pos, ssid, ssid_len);
-               pos += ssid_len;
-       }
-
-       len += ssid_len + 2;
-
-       if (WARN_ON(left < band_ie_len + common_ie_len))
-               return len;
-
-       if (band_ie && band_ie_len) {
-               memcpy(pos, band_ie, band_ie_len);
-               pos += band_ie_len;
-               len += band_ie_len;
-       }
-
-       if (common_ie && common_ie_len) {
-               memcpy(pos, common_ie, common_ie_len);
-               pos += common_ie_len;
-               len += common_ie_len;
-       }
-
-       return (u16)len;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
@@ -325,7 +231,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                         * If there is more than one active interface make
                         * passive scan more fragmented.
                         */
-                       frag_passive_dwell = (global_cnt < 2) ? 40 : 20;
+                       frag_passive_dwell = 40;
                        params->max_out_time = frag_passive_dwell;
                } else {
                        params->suspend_time = 120;
@@ -358,10 +264,10 @@ not_bound:
 
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
                if (params->passive_fragmented)
-                       params->dwell[band].passive = frag_passive_dwell;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(mvm, band);
+                       params->dwell[band].fragmented = frag_passive_dwell;
+
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
+                                                                       band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
                                                                      n_ssids);
        }
@@ -379,20 +285,11 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
 {
        int max_probe_len;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
-       else
-               max_probe_len = mvm->fw->ucode_capa.max_probe_length;
+       max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
 
        /* we create the 802.11 header and SSID element */
        max_probe_len -= 24 + 2;
 
-       /* basic ssid is added only for hw_scan with and old api */
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
-           !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
-           !is_sched_scan)
-               max_probe_len -= 32;
-
        /* DS parameter set element is added on 2.4GHZ band if required */
        if (iwl_mvm_rrm_scan_needed(mvm))
                max_probe_len -= 3;
@@ -404,9 +301,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
 {
        int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
-               return max_ie_len;
-
        /* TODO: [BUG] This function should return the maximum allowed size of
         * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
         * in the same command. So the correct implementation of this function
@@ -420,129 +314,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
        return max_ie_len;
 }
 
-int iwl_mvm_scan_request(struct iwl_mvm *mvm,
-                        struct ieee80211_vif *vif,
-                        struct cfg80211_scan_request *req)
-{
-       struct iwl_host_cmd hcmd = {
-               .id = SCAN_REQUEST_CMD,
-               .len = { 0, },
-               .data = { mvm->scan_cmd, },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       struct iwl_scan_cmd *cmd = mvm->scan_cmd;
-       int ret;
-       u32 status;
-       int ssid_len = 0;
-       u8 *ssid = NULL;
-       bool basic_ssid = !(mvm->fw->ucode_capa.flags &
-                          IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
-       struct iwl_mvm_scan_params params = {};
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* we should have failed registration if scan_cmd was NULL */
-       if (WARN_ON(mvm->scan_cmd == NULL))
-               return -ENOMEM;
-
-       IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
-       mvm->scan_status = IWL_MVM_SCAN_OS;
-       memset(cmd, 0, ksize(cmd));
-
-       cmd->channel_count = (u8)req->n_channels;
-       cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
-       cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
-       cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
-
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
-       cmd->max_out_time = cpu_to_le32(params.max_out_time);
-       cmd->suspend_time = cpu_to_le32(params.suspend_time);
-       if (params.passive_fragmented)
-               cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
-
-       cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
-       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
-                                       MAC_FILTER_IN_BEACON);
-
-       if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
-               cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
-       else
-               cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
-
-       cmd->repeats = cpu_to_le32(1);
-
-       /*
-        * If the user asked for passive scan, don't change to active scan if
-        * you see any activity on the channel - remain passive.
-        */
-       if (req->n_ssids > 0) {
-               cmd->passive2active = cpu_to_le16(1);
-               cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
-               if (basic_ssid) {
-                       ssid = req->ssids[0].ssid;
-                       ssid_len = req->ssids[0].ssid_len;
-               }
-       } else {
-               cmd->passive2active = 0;
-               cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
-       }
-
-       iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
-                               basic_ssid ? 1 : 0);
-
-       cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
-                                          3 << TX_CMD_FLG_BT_PRIO_POS);
-
-       cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
-       cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-       cmd->tx_cmd.rate_n_flags =
-                       iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
-                                                 req->no_cck);
-
-       cmd->tx_cmd.len =
-               cpu_to_le16(iwl_mvm_fill_probe_req(
-                           (struct ieee80211_mgmt *)cmd->data,
-                           vif->addr,
-                           req->n_ssids, ssid, ssid_len,
-                           req->ie, req->ie_len, NULL, 0,
-                           mvm->fw->ucode_capa.max_probe_length));
-
-       iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
-
-       cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
-               le16_to_cpu(cmd->tx_cmd.len) +
-               (cmd->channel_count * sizeof(struct iwl_scan_channel)));
-       hcmd.len[0] = le16_to_cpu(cmd->len);
-
-       status = SCAN_RESPONSE_OK;
-       ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
-       if (!ret && status == SCAN_RESPONSE_OK) {
-               IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
-       } else {
-               /*
-                * If the scan failed, it usually means that the FW was unable
-                * to allocate the time events. Warn on it, but maybe we
-                * should try to send the command again with different params.
-                */
-               IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
-                       status, ret);
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-               ret = -EIO;
-       }
-       return ret;
-}
-
-int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_cmd_response *resp = (void *)pkt->data;
-
-       IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
-                      le32_to_cpu(resp->status));
-       return 0;
-}
-
 int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
                                                struct iwl_rx_cmd_buffer *rxb,
                                                struct iwl_device_cmd *cmd)
@@ -556,130 +327,25 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scan_complete_notif *notif = (void *)pkt->data;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
-                      notif->status, notif->scanned_channels);
-
-       if (mvm->scan_status == IWL_MVM_SCAN_OS)
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-       ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
-
-       iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
-       return 0;
-}
-
 int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
                                    struct iwl_rx_cmd_buffer *rxb,
                                    struct iwl_device_cmd *cmd)
 {
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
-           !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
-               struct iwl_sched_scan_results *notif = (void *)pkt->data;
-
-               if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
-                       return 0;
-       }
-
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
 
        return 0;
 }
 
-static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
-                                    struct iwl_rx_packet *pkt, void *data)
-{
-       struct iwl_mvm *mvm =
-               container_of(notif_wait, struct iwl_mvm, notif_wait);
-       struct iwl_scan_complete_notif *notif;
-       u32 *resp;
-
-       switch (pkt->hdr.cmd) {
-       case SCAN_ABORT_CMD:
-               resp = (void *)pkt->data;
-               if (*resp == CAN_ABORT_STATUS) {
-                       IWL_DEBUG_SCAN(mvm,
-                                      "Scan can be aborted, wait until completion\n");
-                       return false;
-               }
-
-               /*
-                * If scan cannot be aborted, it means that we had a
-                * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
-                * ieee80211_scan_completed already.
-                */
-               IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
-                              *resp);
-               return true;
-
-       case SCAN_COMPLETE_NOTIFICATION:
-               notif = (void *)pkt->data;
-               IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
-                              notif->status);
-               return true;
-
-       default:
-               WARN_ON(1);
-               return false;
-       };
-}
-
-static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
-{
-       struct iwl_notification_wait wait_scan_abort;
-       static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
-                                              SCAN_COMPLETE_NOTIFICATION };
-       int ret;
-
-       iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
-                                  scan_abort_notif,
-                                  ARRAY_SIZE(scan_abort_notif),
-                                  iwl_mvm_scan_abort_notif, NULL);
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
-       if (ret) {
-               IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
-               /* mac80211's state will be cleaned in the nic_restart flow */
-               goto out_remove_notif;
-       }
-
-       return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
-
-out_remove_notif:
-       iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
-       return ret;
-}
-
 int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                                           struct iwl_rx_cmd_buffer *rxb,
                                           struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u8 status, ebs_status;
+       struct iwl_periodic_scan_complete *scan_notif;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
-               struct iwl_periodic_scan_complete *scan_notif;
+       scan_notif = (void *)pkt->data;
 
-               scan_notif = (void *)pkt->data;
-               status = scan_notif->status;
-               ebs_status = scan_notif->ebs_status;
-       } else  {
-               struct iwl_scan_offload_complete *scan_notif;
-
-               scan_notif = (void *)pkt->data;
-               status = scan_notif->status;
-               ebs_status = scan_notif->ebs_status;
-       }
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
 
@@ -687,9 +353,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                       "%s completed, status %s, EBS status %s\n",
                       mvm->scan_status == IWL_MVM_SCAN_SCHED ?
                                "Scheduled scan" : "Scan",
-                      status == IWL_SCAN_OFFLOAD_COMPLETED ?
+                      scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
-                      ebs_status == IWL_SCAN_EBS_SUCCESS ?
+                      scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
                                "success" : "failed");
 
 
@@ -700,64 +366,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
        } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
                mvm->scan_status = IWL_MVM_SCAN_NONE;
                ieee80211_scan_completed(mvm->hw,
-                                        status == IWL_SCAN_OFFLOAD_ABORTED);
+                               scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
        }
 
-       if (ebs_status)
+       if (scan_notif->ebs_status)
                mvm->last_ebs_successful = false;
 
        return 0;
 }
 
-static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
-                                         struct ieee80211_vif *vif,
-                                         struct ieee80211_scan_ies *ies,
-                                         enum ieee80211_band band,
-                                         struct iwl_tx_cmd *cmd,
-                                         u8 *data)
-{
-       u16 cmd_len;
-
-       cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
-       cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-       cmd->sta_id = mvm->aux_sta.sta_id;
-
-       cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
-
-       cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
-                                        vif->addr,
-                                        1, NULL, 0,
-                                        ies->ies[band], ies->len[band],
-                                        ies->common_ies, ies->common_ie_len,
-                                        SCAN_OFFLOAD_PROBE_REQ_SIZE);
-       cmd->len = cpu_to_le16(cmd_len);
-}
-
-static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
-                              struct ieee80211_vif *vif,
-                              struct cfg80211_sched_scan_request *req,
-                              struct iwl_scan_offload_cmd *scan,
-                              struct iwl_mvm_scan_params *params)
-{
-       scan->channel_count = req->n_channels;
-       scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
-       scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
-       scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
-       scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
-
-       scan->max_out_time = cpu_to_le32(params->max_out_time);
-       scan->suspend_time = cpu_to_le32(params->suspend_time);
-
-       scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
-                                         MAC_FILTER_IN_BEACON);
-       scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
-       scan->rep_count = cpu_to_le32(1);
-
-       if (params->passive_fragmented)
-               scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
-}
-
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
 {
        int i;
@@ -815,127 +433,6 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
        }
 }
 
-static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
-                                 struct cfg80211_sched_scan_request *req,
-                                 u8 *channels_buffer,
-                                 enum ieee80211_band band,
-                                 int *head,
-                                 u32 ssid_bitmap,
-                                 struct iwl_mvm_scan_params *params)
-{
-       u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
-       __le32 *type = (__le32 *)channels_buffer;
-       __le16 *channel_number = (__le16 *)(type + n_channels);
-       __le16 *iter_count = channel_number + n_channels;
-       __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
-       u8 *active_dwell = (u8 *)(iter_interval + n_channels);
-       u8 *passive_dwell = active_dwell + n_channels;
-       int i, index = 0;
-
-       for (i = 0; i < req->n_channels; i++) {
-               struct ieee80211_channel *chan = req->channels[i];
-
-               if (chan->band != band)
-                       continue;
-
-               index = *head;
-               (*head)++;
-
-               channel_number[index] = cpu_to_le16(chan->hw_value);
-               active_dwell[index] = params->dwell[band].active;
-               passive_dwell[index] = params->dwell[band].passive;
-
-               iter_count[index] = cpu_to_le16(1);
-               iter_interval[index] = 0;
-
-               if (!(chan->flags & IEEE80211_CHAN_NO_IR))
-                       type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
-
-               type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
-                                          IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
-
-               if (chan->flags & IEEE80211_CHAN_NO_HT40)
-                       type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
-
-               /* scan for all SSIDs from req->ssids */
-               type[index] |= cpu_to_le32(ssid_bitmap);
-       }
-}
-
-int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
-                             struct ieee80211_vif *vif,
-                             struct cfg80211_sched_scan_request *req,
-                             struct ieee80211_scan_ies *ies)
-{
-       int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
-       int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-       int head = 0;
-       u32 ssid_bitmap;
-       int cmd_len;
-       int ret;
-       u8 *probes;
-       bool basic_ssid = !(mvm->fw->ucode_capa.flags &
-                           IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
-
-       struct iwl_scan_offload_cfg *scan_cfg;
-       struct iwl_host_cmd cmd = {
-               .id = SCAN_OFFLOAD_CONFIG_CMD,
-       };
-       struct iwl_mvm_scan_params params = {};
-
-       lockdep_assert_held(&mvm->mutex);
-
-       cmd_len = sizeof(struct iwl_scan_offload_cfg) +
-                 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
-                 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
-
-       scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
-       if (!scan_cfg)
-               return -ENOMEM;
-
-       probes = scan_cfg->data +
-               mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
-
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
-       iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
-       scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
-
-       iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
-                                   &ssid_bitmap, basic_ssid);
-       /* build tx frames for supported bands */
-       if (band_2ghz) {
-               iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
-                                             IEEE80211_BAND_2GHZ,
-                                             &scan_cfg->scan_cmd.tx_cmd[0],
-                                             probes);
-               iwl_build_channel_cfg(mvm, req, scan_cfg->data,
-                                     IEEE80211_BAND_2GHZ, &head,
-                                     ssid_bitmap, &params);
-       }
-       if (band_5ghz) {
-               iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
-                                             IEEE80211_BAND_5GHZ,
-                                             &scan_cfg->scan_cmd.tx_cmd[1],
-                                             probes +
-                                               SCAN_OFFLOAD_PROBE_REQ_SIZE);
-               iwl_build_channel_cfg(mvm, req, scan_cfg->data,
-                                     IEEE80211_BAND_5GHZ, &head,
-                                     ssid_bitmap, &params);
-       }
-
-       cmd.data[0] = scan_cfg;
-       cmd.len[0] = cmd_len;
-       cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-
-       IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-       kfree(scan_cfg);
-       return ret;
-}
-
 int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req)
 {
@@ -1018,33 +515,6 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
        return true;
 }
 
-int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
-                            struct cfg80211_sched_scan_request *req)
-{
-       struct iwl_scan_offload_req scan_req = {
-               .watchdog = IWL_SCHED_SCAN_WATCHDOG,
-
-               .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
-               .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
-               .schedule_line[0].full_scan_mul = 1,
-
-               .schedule_line[1].iterations = 0xff,
-               .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
-               .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
-       };
-
-       if (iwl_mvm_scan_pass_all(mvm, req))
-               scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
-
-       if (mvm->last_ebs_successful &&
-           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
-               scan_req.flags |=
-                       cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
-
-       return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
-                                   sizeof(scan_req), &scan_req);
-}
-
 int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif,
                               struct cfg80211_sched_scan_request *req,
@@ -1057,21 +527,12 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
                if (ret)
                        return ret;
                ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
-       } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
-               mvm->scan_status = IWL_MVM_SCAN_SCHED;
-               ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
-               if (ret)
-                       return ret;
-               ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
        } else {
                mvm->scan_status = IWL_MVM_SCAN_SCHED;
-               ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
-               if (ret)
-                       return ret;
                ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
                if (ret)
                        return ret;
-               ret = iwl_mvm_sched_scan_start(mvm, req);
+               ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
        }
 
        return ret;
@@ -1088,9 +549,7 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
        /* Exit instantly with error when device is not ready
         * to receive scan abort command or it does not perform
         * scheduled scan currently */
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
-           (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
-            mvm->scan_status != IWL_MVM_SCAN_OS))
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
                return -EIO;
 
        ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
@@ -1131,13 +590,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
        if (iwl_mvm_is_radio_killed(mvm))
                goto out;
 
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
-           (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
-            mvm->scan_status != IWL_MVM_SCAN_OS)) {
-               IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
-               return 0;
-       }
-
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
                                   scan_done_notif,
                                   ARRAY_SIZE(scan_done_notif),
@@ -1317,7 +769,7 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
        cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
        if (params->passive_fragmented)
                cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].passive;
+                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
@@ -1580,9 +1032,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
                return 0;
        }
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               return iwl_mvm_scan_offload_stop(mvm, true);
-       return iwl_mvm_cancel_regular_scan(mvm);
+       return iwl_mvm_scan_offload_stop(mvm, true);
 }
 
 /* UMAC scan API */
@@ -1765,7 +1215,7 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
        cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
        if (params->passive_fragmented)
                cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].passive;
+                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
        cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
@@ -2159,14 +1609,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
                                mvm->fw->ucode_capa.n_scan_channels +
                        sizeof(struct iwl_scan_req_umac_tail);
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
-               return sizeof(struct iwl_scan_req_unified_lmac) +
-                       sizeof(struct iwl_scan_channel_cfg_lmac) *
-                               mvm->fw->ucode_capa.n_scan_channels +
-                       sizeof(struct iwl_scan_probe_req);
-
-       return sizeof(struct iwl_scan_cmd) +
-               mvm->fw->ucode_capa.max_probe_length +
-                       mvm->fw->ucode_capa.n_scan_channels *
-               sizeof(struct iwl_scan_channel);
+       return sizeof(struct iwl_scan_req_unified_lmac) +
+               sizeof(struct iwl_scan_channel_cfg_lmac) *
+               mvm->fw->ucode_capa.n_scan_channels +
+               sizeof(struct iwl_scan_probe_req);
 }
index 07304e1fd64aa70b41d4c9a18762b48924a6f250..7906b97c81b96d357270d5502d92e3bcb17c6f73 100644 (file)
@@ -664,6 +664,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
                                            info);
+               info->status.status_driver_data[1] =
+                       (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
 
                /* Single frame failure in an AMPDU queue => send BAR */
                if (txq_id >= mvm->first_agg_queue &&
@@ -909,6 +911,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
        info->status.tx_time = tid_data->tx_time;
        info->status.status_driver_data[0] =
                (void *)(uintptr_t)tid_data->reduced_tpc;
+       info->status.status_driver_data[1] =
+               (void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
 int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
index 8decf99532298dcb7fa3135421c441e5d6e98368..2b9de63951e609463a5aa65b289e7908b1f163a2 100644 (file)
@@ -332,7 +332,7 @@ static const char *desc_lookup(u32 num)
  * read with u32-sized accesses, any members with a different size
  * need to be ordered correctly though!
  */
-struct iwl_error_event_table {
+struct iwl_error_event_table_v1 {
        u32 valid;              /* (nonzero) valid, (0) log is empty */
        u32 error_id;           /* type of error */
        u32 pc;                 /* program counter */
@@ -377,7 +377,55 @@ struct iwl_error_event_table {
        u32 u_timestamp;        /* indicate when the date and time of the
                                 * compilation */
        u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed;
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 pc;                 /* program counter */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 gp3;                /* GP3 timer register */
+       u32 major;              /* uCode version major */
+       u32 minor;              /* uCode version minor */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
 
 /*
  * UMAC error struct - relevant starting from family 8000 chip.
@@ -396,11 +444,11 @@ struct iwl_umac_error_event_table {
        u32 data1;              /* error-specific data */
        u32 data2;              /* error-specific data */
        u32 data3;              /* error-specific data */
-       u32 umac_fw_ver;        /* UMAC version */
-       u32 umac_fw_api_ver;    /* UMAC FW API ver */
+       u32 umac_major;
+       u32 umac_minor;
        u32 frame_pointer;      /* core register 27*/
        u32 stack_pointer;      /* core register 28 */
-       u32 cmd_header; /* latest host cmd sent to UMAC */
+       u32 cmd_header;         /* latest host cmd sent to UMAC */
        u32 nic_isr_pref;       /* ISR status register */
 } __packed;
 
@@ -441,18 +489,18 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
        IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
        IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver);
-       IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver);
+       IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
+       IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
        IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
        IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
        IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
        IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
 }
 
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
 {
        struct iwl_trans *trans = mvm->trans;
-       struct iwl_error_event_table table;
+       struct iwl_error_event_table_v1 table;
        u32 base;
 
        base = mvm->error_event_table;
@@ -489,7 +537,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                                      table.data1, table.data2, table.data3,
                                      table.blink1, table.blink2, table.ilink1,
                                      table.ilink2, table.bcon_time, table.gp1,
-                                     table.gp2, table.gp3, table.ucode_ver,
+                                     table.gp2, table.gp3, table.ucode_ver, 0,
                                      table.hw_ver, table.brd_ver);
        IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
@@ -530,6 +578,92 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                iwl_mvm_dump_umac_error_log(mvm);
 }
 
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+       struct iwl_trans *trans = mvm->trans;
+       struct iwl_error_event_table table;
+       u32 base;
+
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+               iwl_mvm_dump_nic_error_log_old(mvm);
+               return;
+       }
+
+       base = mvm->error_event_table;
+       if (mvm->cur_ucode == IWL_UCODE_INIT) {
+               if (!base)
+                       base = mvm->fw->init_errlog_ptr;
+       } else {
+               if (!base)
+                       base = mvm->fw->inst_errlog_ptr;
+       }
+
+       if (base < 0x800000) {
+               IWL_ERR(mvm,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base,
+                       (mvm->cur_ucode == IWL_UCODE_INIT)
+                                       ? "Init" : "RT");
+               return;
+       }
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+                       mvm->status, table.valid);
+       }
+
+       /* Do not change this output - scripts rely on it */
+
+       IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
+       trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+                                     table.data1, table.data2, table.data3,
+                                     table.blink1, table.blink2, table.ilink1,
+                                     table.ilink2, table.bcon_time, table.gp1,
+                                     table.gp2, table.gp3, table.major,
+                                     table.minor, table.hw_ver, table.brd_ver);
+       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+               desc_lookup(table.error_id));
+       IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
+       IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+       IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+       IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
+       IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
+       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+       IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+       if (mvm->support_umac_log)
+               iwl_mvm_dump_umac_error_log(mvm);
+}
 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
                        const struct iwl_trans_txq_scd_cfg *cfg,
                        unsigned int wdg_timeout)
@@ -643,6 +777,40 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ieee80211_request_smps(vif, smps_mode);
 }
 
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
+{
+       struct iwl_statistics_cmd scmd = {
+               .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
+       };
+       struct iwl_host_cmd cmd = {
+               .id = STATISTICS_CMD,
+               .len[0] = sizeof(scmd),
+               .data[0] = &scmd,
+               .flags = CMD_WANT_SKB,
+       };
+       int ret;
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret)
+               return ret;
+
+       iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+       iwl_free_resp(&cmd);
+
+       if (clear)
+               iwl_mvm_accu_radio_stats(mvm);
+
+       return 0;
+}
+
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+{
+       mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+       mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+       mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+       mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+}
+
 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
                                   struct ieee80211_vif *vif)
 {
@@ -717,25 +885,6 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
        return result;
 }
 
-static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
-{
-       bool *idle = _data;
-
-       if (!vif->bss_conf.idle)
-               *idle = false;
-}
-
-bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
-{
-       bool idle = true;
-
-       ieee80211_iterate_active_interfaces_atomic(
-                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                       iwl_mvm_idle_iter, &idle);
-
-       return idle;
-}
-
 struct iwl_bss_iter_data {
        struct ieee80211_vif *vif;
        bool error;
index 69935aa5a1b3c702ff147bc521b3621ae70a63b8..f31a941607719053e2e18bcacb24f3d5b6bd06ce 100644 (file)
@@ -898,6 +898,9 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
        IWL_DEBUG_FW(trans, "working with %s CPU\n",
                     image->is_dual_cpus ? "Dual" : "Single");
 
+       if (trans->dbg_dest_tlv)
+               iwl_pcie_apply_destination(trans);
+
        /* configure the ucode to be ready to get the secured image */
        /* release CPU reset */
        iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
@@ -914,9 +917,6 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
        if (ret)
                return ret;
 
-       if (trans->dbg_dest_tlv)
-               iwl_pcie_apply_destination(trans);
-
        /* wait for image verification to complete  */
        ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
                                LMPM_SECURE_BOOT_STATUS_SUCCESS,
index cc6a0a586f0b748c054c4c0e8631ea0d706501cb..26cbf1dcc6620f0502314daf8cac579794c8f6e4 100644 (file)
@@ -742,8 +742,7 @@ void lbs_debugfs_init(void)
 
 void lbs_debugfs_remove(void)
 {
-       if (lbs_dir)
-                debugfs_remove(lbs_dir);
+       debugfs_remove(lbs_dir);
 }
 
 void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
index 569b64ecc6075f1fa028091b7b319c20fbda01c9..8079560f496581600cb658c4fa09d8e5d1faed81 100644 (file)
@@ -667,7 +667,7 @@ static int lbs_setup_firmware(struct lbs_private *priv)
        lbs_deb_enter(LBS_DEB_FW);
 
        /* Read MAC address from firmware */
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
        ret = lbs_update_hw_spec(priv);
        if (ret)
                goto done;
@@ -871,7 +871,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
 
        lbs_deb_enter(LBS_DEB_MAIN);
 
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
 
        priv->connect_status = LBS_DISCONNECTED;
        priv->channel = DEFAULT_AD_HOC_CHANNEL;
index 25c5acc78bd141c218499eae06d1ed1c33f70e83..ed02e4bf2c26f5cc333d88b89347b89e577779bb 100644 (file)
@@ -152,7 +152,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
        /*
         * Read priv address from HW
         */
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
        ret = lbtf_update_hw_spec(priv);
        if (ret) {
                ret = -1;
@@ -199,7 +199,7 @@ out:
 static int lbtf_init_adapter(struct lbtf_private *priv)
 {
        lbtf_deb_enter(LBTF_DEB_MAIN);
-       memset(priv->current_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->current_addr);
        mutex_init(&priv->lock);
 
        priv->vif = NULL;
index 4a4c6586a8d2dcda2b6f49a5b767bcc48304e138..d56b7859a43703a46f8bd76af0feb3bb8450ed12 100644 (file)
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
-       genlmsg_unicast(&init_net, skb, dst_portid);
+       if (genlmsg_unicast(&init_net, skb, dst_portid))
+               goto err_free_txskb;
 
        /* Enqueue the packet */
        skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
        return;
 
 nla_put_failure:
+       nlmsg_free(skb);
+err_free_txskb:
        printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
        ieee80211_free_txskb(hw, my_skb);
        data->tx_failed++;
@@ -1908,7 +1911,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
 
        printk(KERN_DEBUG "hwsim sw_scan_complete\n");
        hwsim->scanning = false;
-       memset(hwsim->scan_addr, 0, ETH_ALEN);
+       eth_zero_addr(hwsim->scan_addr);
 
        mutex_unlock(&hwsim->mutex);
 }
@@ -2264,7 +2267,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        skb_queue_head_init(&data->pending);
 
        SET_IEEE80211_DEV(hw, data->dev);
-       memset(addr, 0, ETH_ALEN);
+       eth_zero_addr(addr);
        addr[0] = 0x02;
        addr[3] = idx >> 8;
        addr[4] = idx;
@@ -2597,7 +2600,7 @@ static void hwsim_mon_setup(struct net_device *dev)
        ether_setup(dev);
        dev->tx_queue_len = 0;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
-       memset(dev->dev_addr, 0, ETH_ALEN);
+       eth_zero_addr(dev->dev_addr);
        dev->dev_addr[0] = 0x12;
 }
 
index 41c8e25df9544021278a0998c6b89fa3469d4c98..8e1f681f960b73e02385d74a48b9e0b4efde16b3 100644 (file)
@@ -1563,7 +1563,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
 
        wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
 
-       memset(deauth_mac, 0, ETH_ALEN);
+       eth_zero_addr(deauth_mac);
 
        spin_lock_irqsave(&priv->sta_list_spinlock, flags);
        sta_node = mwifiex_get_sta_entry(priv, params->mac);
@@ -1786,7 +1786,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
        wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
                " reason code %d\n", priv->cfg_bssid, reason_code);
 
-       memset(priv->cfg_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->cfg_bssid);
        priv->hs2_enabled = false;
 
        return 0;
@@ -2046,7 +2046,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                dev_dbg(priv->adapter->dev,
                        "info: association to bssid %pM failed\n",
                        priv->cfg_bssid);
-               memset(priv->cfg_bssid, 0, ETH_ALEN);
+               eth_zero_addr(priv->cfg_bssid);
 
                if (ret > 0)
                        cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
@@ -2194,7 +2194,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
-       memset(priv->cfg_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->cfg_bssid);
 
        return 0;
 }
@@ -2397,7 +2397,6 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
        ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 }
 
-#define MWIFIEX_MAX_WQ_LEN  30
 /*
  *  create a new virtual interface with the given name
  */
@@ -2411,7 +2410,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        struct mwifiex_private *priv;
        struct net_device *dev;
        void *mdev_priv;
-       char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN];
 
        if (!adapter)
                return ERR_PTR(-EFAULT);
@@ -2576,12 +2574,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                return ERR_PTR(-EFAULT);
        }
 
-       strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC");
-       strcat(dfs_cac_str, name);
-       priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str,
+       priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
                                                  WQ_HIGHPRI |
                                                  WQ_MEM_RECLAIM |
-                                                 WQ_UNBOUND, 1);
+                                                 WQ_UNBOUND, 1, name);
        if (!priv->dfs_cac_workqueue) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
                free_netdev(dev);
@@ -2594,11 +2590,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
        INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
 
-       strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW");
-       strcat(dfs_chsw_str, name);
-       priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str,
+       priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s",
                                                      WQ_HIGHPRI | WQ_UNBOUND |
-                                                     WQ_MEM_RECLAIM, 1);
+                                                     WQ_MEM_RECLAIM, 1, name);
        if (!priv->dfs_chan_sw_workqueue) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
                free_netdev(dev);
index 88d0eade6bb128565def33ab001cdb01c864b040..cf2fa110e2514f4e56798768463d57970c1481d6 100644 (file)
@@ -33,6 +33,7 @@
 #define MWIFIEX_MAX_BSS_NUM         (3)
 
 #define MWIFIEX_DMA_ALIGN_SZ       64
+#define MWIFIEX_RX_HEADROOM        64
 #define MAX_TXPD_SZ                32
 #define INTF_HDR_ALIGN              4
 
index b77ba743e1c498c1bae30b11ae0b571937b971c8..0153ce6d5879bacd48d6d42aaca3d0ddec5412cb 100644 (file)
@@ -76,7 +76,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
        u32 i;
 
        priv->media_connected = false;
-       memset(priv->curr_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->curr_addr);
 
        priv->pkt_tx_ctrl = 0;
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -296,10 +296,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
-       adapter->ext_scan = false;
        adapter->key_api_major_ver = 0;
        adapter->key_api_minor_ver = 0;
-       memset(adapter->perm_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(adapter->perm_addr);
        adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
        adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
        adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
index 7e74b4fccddd56e67eb8b28453553e4083eec895..74488aba92bdec159df217c47ae08c990161e847 100644 (file)
@@ -190,14 +190,16 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
 
        /* Check if already processing */
        if (adapter->mwifiex_processing) {
+               adapter->more_task_flag = true;
                spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto exit_main_proc;
        } else {
                adapter->mwifiex_processing = true;
-               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
        }
 process_start:
        do {
+               adapter->more_task_flag = false;
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
                    (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
                        break;
@@ -238,6 +240,7 @@ process_start:
                        adapter->pm_wakeup_fw_try = true;
                        mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
                        adapter->if_ops.wakeup(adapter);
+                       spin_lock_irqsave(&adapter->main_proc_lock, flags);
                        continue;
                }
 
@@ -295,8 +298,10 @@ process_start:
                if ((adapter->ps_state == PS_STATE_SLEEP) ||
                    (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
                    (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
-                   adapter->tx_lock_flag)
+                   adapter->tx_lock_flag){
+                       spin_lock_irqsave(&adapter->main_proc_lock, flags);
                        continue;
+               }
 
                if (!adapter->cmd_sent && !adapter->curr_cmd) {
                        if (mwifiex_exec_next_cmd(adapter) == -1) {
@@ -330,15 +335,12 @@ process_start:
                        }
                        break;
                }
+               spin_lock_irqsave(&adapter->main_proc_lock, flags);
        } while (true);
 
        spin_lock_irqsave(&adapter->main_proc_lock, flags);
-       if (!adapter->delay_main_work &&
-           (adapter->int_status || IS_CARD_RX_RCVD(adapter))) {
-               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+       if (adapter->more_task_flag)
                goto process_start;
-       }
-
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
index f0a6af179af03ba2ba2497c6a296f5f450b18463..16be45e9a66acb2d853b5d26c133411ddaf3c378 100644 (file)
@@ -140,6 +140,9 @@ enum {
 
 #define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000
 
+/* Address alignment */
+#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
+
 struct mwifiex_dbg {
        u32 num_cmd_host_to_card_failure;
        u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -774,6 +777,7 @@ struct mwifiex_adapter {
        /* spin lock for main process */
        spinlock_t main_proc_lock;
        u32 mwifiex_processing;
+       u8 more_task_flag;
        u16 tx_buf_size;
        u16 curr_tx_buf_size;
        u32 ioport;
@@ -1417,6 +1421,7 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
                            u8 rx_rate, u8 ht_info);
 
 void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
index a5828da5936534595825137fd733c7f66dcc0bd6..4b463c3b99064904ba15c8503cc6f922cfe345ef 100644 (file)
@@ -203,7 +203,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.reg = data->reg;
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
                card->pcie.tx_buf_size = data->tx_buf_size;
-               card->pcie.supports_fw_dump = data->supports_fw_dump;
+               card->pcie.can_dump_fw = data->can_dump_fw;
                card->pcie.can_ext_scan = data->can_ext_scan;
        }
 
@@ -498,7 +498,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
 
        for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
                /* Allocate skb here so that firmware can DMA data from it */
-               skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+               skb = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+                                          GFP_KERNEL | GFP_DMA);
                if (!skb) {
                        dev_err(adapter->dev,
                                "Unable to allocate skb for RX ring.\n");
@@ -1297,7 +1298,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                        }
                }
 
-               skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+               skb_tmp = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+                                              GFP_KERNEL | GFP_DMA);
                if (!skb_tmp) {
                        dev_err(adapter->dev,
                                "Unable to allocate skb.\n");
@@ -2271,7 +2273,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
        int ret;
        static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
 
-       if (!card->pcie.supports_fw_dump)
+       if (!card->pcie.can_dump_fw)
                return;
 
        for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
index 666d40e9dbc36495238537af92345d37e537c173..0e7ee8b72358f7feba632f43349113a6e662b210 100644 (file)
@@ -205,7 +205,7 @@ struct mwifiex_pcie_device {
        const struct mwifiex_pcie_card_reg *reg;
        u16 blksz_fw_dl;
        u16 tx_buf_size;
-       bool supports_fw_dump;
+       bool can_dump_fw;
        bool can_ext_scan;
 };
 
@@ -214,7 +214,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
        .reg            = &mwifiex_reg_8766,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
-       .supports_fw_dump = false,
+       .can_dump_fw = false,
        .can_ext_scan = true,
 };
 
@@ -223,7 +223,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .reg            = &mwifiex_reg_8897,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
-       .supports_fw_dump = true,
+       .can_dump_fw = true,
        .can_ext_scan = true,
 };
 
index 91e36cda9543e4cd20e1755572f8a31a24774b88..57d85ab442bf3f2569d7ffd434a2b7e45bb314d8 100644 (file)
@@ -105,8 +105,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->tx_buf_size = data->tx_buf_size;
                card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
                card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
-               card->supports_fw_dump = data->supports_fw_dump;
-               card->auto_tdls = data->auto_tdls;
+               card->can_dump_fw = data->can_dump_fw;
+               card->can_auto_tdls = data->can_auto_tdls;
                card->can_ext_scan = data->can_ext_scan;
        }
 
@@ -1357,7 +1357,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                        return -1;
                rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
 
-               skb = dev_alloc_skb(rx_len);
+               skb = mwifiex_alloc_rx_buf(rx_len, GFP_KERNEL | GFP_DMA);
                if (!skb)
                        return -1;
 
@@ -1454,7 +1454,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                        }
                        rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
 
-                       skb = dev_alloc_skb(rx_len);
+                       skb = mwifiex_alloc_rx_buf(rx_len,
+                                                  GFP_KERNEL | GFP_DMA);
 
                        if (!skb) {
                                dev_err(adapter->dev, "%s: failed to alloc skb",
@@ -1887,7 +1888,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
                return -1;
        }
 
-       adapter->auto_tdls = card->auto_tdls;
+       adapter->auto_tdls = card->can_auto_tdls;
        adapter->ext_scan = card->can_ext_scan;
        return ret;
 }
@@ -2032,7 +2033,7 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
 
        mwifiex_dump_drv_info(adapter);
 
-       if (!card->supports_fw_dump)
+       if (!card->can_dump_fw)
                return;
 
        for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
index 957cca24661828c4574f66bee9ac0ecd0126e5b8..c636944c77bcdc935bbbbf07e5f87e37e318bf43 100644 (file)
@@ -238,9 +238,6 @@ struct sdio_mmc_card {
        const struct mwifiex_sdio_card_reg *reg;
        u8 max_ports;
        u8 mp_agg_pkt_limit;
-       bool supports_sdio_new_mode;
-       bool has_control_mask;
-       bool supports_fw_dump;
        u16 tx_buf_size;
        u32 mp_tx_agg_buf_size;
        u32 mp_rx_agg_buf_size;
@@ -255,7 +252,10 @@ struct sdio_mmc_card {
        u8 curr_wr_port;
 
        u8 *mp_regs;
-       u8 auto_tdls;
+       bool supports_sdio_new_mode;
+       bool has_control_mask;
+       bool can_dump_fw;
+       bool can_auto_tdls;
        bool can_ext_scan;
 
        struct mwifiex_sdio_mpa_tx mpa_tx;
@@ -267,13 +267,13 @@ struct mwifiex_sdio_device {
        const struct mwifiex_sdio_card_reg *reg;
        u8 max_ports;
        u8 mp_agg_pkt_limit;
-       bool supports_sdio_new_mode;
-       bool has_control_mask;
-       bool supports_fw_dump;
        u16 tx_buf_size;
        u32 mp_tx_agg_buf_size;
        u32 mp_rx_agg_buf_size;
-       u8 auto_tdls;
+       bool supports_sdio_new_mode;
+       bool has_control_mask;
+       bool can_dump_fw;
+       bool can_auto_tdls;
        bool can_ext_scan;
 };
 
@@ -412,13 +412,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
        .reg = &mwifiex_reg_sd87xx,
        .max_ports = 16,
        .mp_agg_pkt_limit = 8,
-       .supports_sdio_new_mode = false,
-       .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = false,
 };
 
@@ -427,13 +427,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
        .reg = &mwifiex_reg_sd87xx,
        .max_ports = 16,
        .mp_agg_pkt_limit = 8,
-       .supports_sdio_new_mode = false,
-       .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -442,13 +442,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
        .reg = &mwifiex_reg_sd87xx,
        .max_ports = 16,
        .mp_agg_pkt_limit = 8,
-       .supports_sdio_new_mode = false,
-       .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -457,13 +457,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .reg = &mwifiex_reg_sd8897,
        .max_ports = 32,
        .mp_agg_pkt_limit = 16,
-       .supports_sdio_new_mode = true,
-       .has_control_mask = false,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
-       .supports_fw_dump = true,
-       .auto_tdls = false,
+       .supports_sdio_new_mode = true,
+       .has_control_mask = false,
+       .can_dump_fw = true,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
@@ -472,13 +472,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
        .reg = &mwifiex_reg_sd8887,
        .max_ports = 32,
        .mp_agg_pkt_limit = 16,
-       .supports_sdio_new_mode = true,
-       .has_control_mask = false,
-       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
-       .supports_fw_dump = false,
-       .auto_tdls = true,
+       .supports_sdio_new_mode = true,
+       .has_control_mask = false,
+       .can_dump_fw = false,
+       .can_auto_tdls = true,
        .can_ext_scan = true,
 };
 
@@ -492,8 +492,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
-       .supports_fw_dump = false,
-       .auto_tdls = false,
+       .can_dump_fw = false,
+       .can_auto_tdls = false,
        .can_ext_scan = true,
 };
 
index 80ffe74124969a2410e2a1ce382d0f521f035238..64c4223a1e1ee919783493487f2c449d8c8693bd 100644 (file)
@@ -135,7 +135,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
                cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
                                      GFP_KERNEL);
        }
-       memset(priv->cfg_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->cfg_bssid);
 
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
        if (netif_carrier_ok(priv->netdev))
index ac93557cbdc96ec951fd91f2f4d581b8cd9a7a5b..ea4549f0e0b931b449c4a13879a81d3a4025c77e 100644 (file)
@@ -80,11 +80,13 @@ EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
 int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
                       struct mwifiex_tx_param *tx_param)
 {
-       int ret = -1;
+       int hroom, ret = -1;
        struct mwifiex_adapter *adapter = priv->adapter;
        u8 *head_ptr;
        struct txpd *local_tx_pd = NULL;
 
+       hroom = (adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
+
        if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
                head_ptr = mwifiex_process_uap_txpd(priv, skb);
        else
@@ -92,11 +94,9 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
 
        if (head_ptr) {
                if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
-                       local_tx_pd =
-                               (struct txpd *) (head_ptr + INTF_HEADER_LEN);
+                       local_tx_pd = (struct txpd *)(head_ptr + hroom);
                if (adapter->iface_type == MWIFIEX_USB) {
                        adapter->data_sent = true;
-                       skb_pull(skb, INTF_HEADER_LEN);
                        ret = adapter->if_ops.host_to_card(adapter,
                                                           MWIFIEX_USB_EP_DATA,
                                                           skb, NULL);
index 308550611f22fe5924c6d51f39e398b5e736ab89..2148a573396b8dfcbcf2dba17843cace3af250a6 100644 (file)
@@ -367,6 +367,13 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
        if (!skb)
                return -1;
 
+       if (!priv->mgmt_frame_mask ||
+           priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
+               dev_dbg(priv->adapter->dev,
+                       "do not receive mgmt frames on uninitialized intf");
+               return -1;
+       }
+
        rx_pd = (struct rxpd *)skb->data;
 
        skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
@@ -624,3 +631,26 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv)
        for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++)
                atomic_set(&phist_data->sig_str[ix], 0);
 }
+
+void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags)
+{
+       struct sk_buff *skb;
+       int buf_len, pad;
+
+       buf_len = rx_len + MWIFIEX_RX_HEADROOM + MWIFIEX_DMA_ALIGN_SZ;
+
+       skb = __dev_alloc_skb(buf_len, flags);
+
+       if (!skb)
+               return NULL;
+
+       skb_reserve(skb, MWIFIEX_RX_HEADROOM);
+
+       pad = MWIFIEX_ALIGN_ADDR(skb->data, MWIFIEX_DMA_ALIGN_SZ) -
+             (long)skb->data;
+
+       skb_reserve(skb, pad);
+
+       return skb;
+}
+EXPORT_SYMBOL_GPL(mwifiex_alloc_rx_buf);
index ef717acec8b76f3fc45cd1b210cccd5b3948f7d7..0cd4f6bed9fc4f6535bdb5e8e02578db13d5bb68 100644 (file)
@@ -730,7 +730,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        } else {
                memcpy(ra, skb->data, ETH_ALEN);
                if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
-                       memset(ra, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(ra);
                ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
        }
 
index f9b1218c761a4b7a187c239239365fe077649624..95921167b53f74a8577710a44afcd1f71b06142a 100644 (file)
@@ -1277,7 +1277,7 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
        struct mwl8k_priv *priv = hw->priv;
 
        priv->capture_beacon = false;
-       memset(priv->capture_bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->capture_bssid);
 
        /*
         * Use GFP_ATOMIC as rxq_process is called from
index 6abdaf0aa052253800697eb1631d100683ba2ec7..1d4dae422106c673351439e3f680feeff2adb06b 100644 (file)
@@ -168,7 +168,7 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
        if (is_zero_ether_addr(ap_addr->sa_data) ||
            is_broadcast_ether_addr(ap_addr->sa_data)) {
                priv->bssid_fixed = 0;
-               memset(priv->desired_bssid, 0, ETH_ALEN);
+               eth_zero_addr(priv->desired_bssid);
 
                /* "off" means keep existing connection */
                if (ap_addr->sa_data[0] == 0) {
index 5367d510b22d7862360eaf04b7a994cca701e7b7..275408eaf95e6d54f006b853475229ea105ee9dd 100644 (file)
@@ -671,7 +671,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len,
        if (addr)
                memcpy(rxkey->mac, addr, ETH_ALEN);
        else
-               memset(rxkey->mac, ~0, ETH_ALEN);
+               eth_broadcast_addr(rxkey->mac);
 
        switch (algo) {
        case P54_CRYPTO_WEP:
index b9250d75d2539d827aec114a83e8056faec389aa..e79674f73dc5766cda5bfae2c1762a5aeb98f863 100644 (file)
@@ -182,7 +182,7 @@ static int p54_start(struct ieee80211_hw *dev)
        if (err)
                goto out;
 
-       memset(priv->bssid, ~0, ETH_ALEN);
+       eth_broadcast_addr(priv->bssid);
        priv->mode = NL80211_IFTYPE_MONITOR;
        err = p54_setup_mac(priv);
        if (err) {
@@ -274,8 +274,8 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
                wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ);
        }
        priv->mode = NL80211_IFTYPE_MONITOR;
-       memset(priv->mac_addr, 0, ETH_ALEN);
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->mac_addr);
+       eth_zero_addr(priv->bssid);
        p54_setup_mac(priv);
        mutex_unlock(&priv->conf_mutex);
 }
@@ -794,7 +794,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
        init_completion(&priv->beacon_comp);
        INIT_DELAYED_WORK(&priv->work, p54_work);
 
-       memset(&priv->mc_maclist[0], ~0, ETH_ALEN);
+       eth_broadcast_addr(priv->mc_maclist[0]);
        priv->curchan = NULL;
        p54_reset_stats(priv);
        return dev;
index 8330fa33e50b1e2f933f813ee187c407184780ae..477f86354dc5a7ff8a324717a16093cd9fc55f81 100644 (file)
@@ -808,7 +808,7 @@ static int ray_dev_init(struct net_device *dev)
 
        /* copy mac and broadcast addresses to linux device */
        memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
-       memset(dev->broadcast, 0xff, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
 
        dev_dbg(&link->dev, "ray_dev_init ending\n");
        return 0;
index 60d44ce9c0173b48af894b14264e2f761c34de44..d72ff8e7125d4525d1761c60d3828dba22b63296 100644 (file)
@@ -199,13 +199,13 @@ enum ndis_80211_pmkid_cand_list_flag_bits {
 
 struct ndis_80211_auth_request {
        __le32 length;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[2];
        __le32 flags;
 } __packed;
 
 struct ndis_80211_pmkid_candidate {
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[2];
        __le32 flags;
 } __packed;
@@ -248,7 +248,7 @@ struct ndis_80211_conf {
 
 struct ndis_80211_bssid_ex {
        __le32 length;
-       u8 mac[6];
+       u8 mac[ETH_ALEN];
        u8 padding[2];
        struct ndis_80211_ssid ssid;
        __le32 privacy;
@@ -283,7 +283,7 @@ struct ndis_80211_key {
        __le32 size;
        __le32 index;
        __le32 length;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[6];
        u8 rsc[8];
        u8 material[32];
@@ -292,7 +292,7 @@ struct ndis_80211_key {
 struct ndis_80211_remove_key {
        __le32 size;
        __le32 index;
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 padding[2];
 } __packed;
 
@@ -310,7 +310,7 @@ struct ndis_80211_assoc_info {
        struct req_ie {
                __le16 capa;
                __le16 listen_interval;
-               u8 cur_ap_address[6];
+               u8 cur_ap_address[ETH_ALEN];
        } req_ie;
        __le32 req_ie_length;
        __le32 offset_req_ies;
@@ -338,7 +338,7 @@ struct ndis_80211_capability {
 } __packed;
 
 struct ndis_80211_bssid_info {
-       u8 bssid[6];
+       u8 bssid[ETH_ALEN];
        u8 pmkid[16];
 } __packed;
 
@@ -1037,7 +1037,7 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
                              bssid, &len);
 
        if (ret != 0)
-               memset(bssid, 0, ETH_ALEN);
+               eth_zero_addr(bssid);
 
        return ret;
 }
@@ -1391,7 +1391,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
        priv->encr_keys[index].len = key_len;
        priv->encr_keys[index].cipher = cipher;
        memcpy(&priv->encr_keys[index].material, key, key_len);
-       memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN);
+       eth_broadcast_addr(priv->encr_keys[index].bssid);
 
        return 0;
 }
@@ -1466,7 +1466,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
        } else {
                /* group key */
                if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
-                       memset(ndis_key.bssid, 0xff, ETH_ALEN);
+                       eth_broadcast_addr(ndis_key.bssid);
                else
                        get_bssid(usbdev, ndis_key.bssid);
        }
@@ -1486,7 +1486,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
        if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY)
                memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN);
        else
-               memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN);
+               eth_broadcast_addr(priv->encr_keys[index].bssid);
 
        if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY)
                priv->encr_tx_key_index = index;
@@ -2280,7 +2280,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
        netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
 
        priv->connected = false;
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
 
        return deauthenticate(usbdev);
 }
@@ -2392,7 +2392,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
        netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
 
        priv->connected = false;
-       memset(priv->bssid, 0, ETH_ALEN);
+       eth_zero_addr(priv->bssid);
 
        return deauthenticate(usbdev);
 }
@@ -2857,7 +2857,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
 
        if (priv->connected) {
                priv->connected = false;
-               memset(priv->bssid, 0, ETH_ALEN);
+               eth_zero_addr(priv->bssid);
 
                deauthenticate(usbdev);
 
index 006b8bcb2e31dfc5c21d7ed602546003e4a9e54c..2b4ef256c6b9432675b2de9bae9b6cd95c6309d2 100644 (file)
@@ -243,14 +243,14 @@ config RT2X00_LIB
        select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
-       boolean
+       bool
        select FW_LOADER
 
 config RT2X00_LIB_CRYPTO
-       boolean
+       bool
 
 config RT2X00_LIB_LEDS
-       boolean
+       bool
        default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
 
 config RT2X00_LIB_DEBUGFS
index c6cb49c3ee32cff431bc679cf3a09ab92d1de4e0..dee4ac2f27e2c372afc6b7111357e4cfbed207ae 100644 (file)
@@ -45,9 +45,6 @@ enum ap_peer {
 #define RTL_TX_DESC_SIZE       32
 #define RTL_TX_HEADER_SIZE     (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
 
-#define HT_AMSDU_SIZE_4K       3839
-#define HT_AMSDU_SIZE_8K       7935
-
 #define MAX_BIT_RATE_40MHZ_MCS15       300     /* Mbps */
 #define MAX_BIT_RATE_40MHZ_MCS7                150     /* Mbps */
 
@@ -61,9 +58,6 @@ enum ap_peer {
 #define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS9   390     /* Mbps */
 #define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS7   293     /* Mbps */
 
-#define RTL_RATE_COUNT_LEGACY          12
-#define RTL_CHANNEL_COUNT              14
-
 #define FRAME_OFFSET_FRAME_CONTROL     0
 #define FRAME_OFFSET_DURATION          2
 #define FRAME_OFFSET_ADDRESS1          4
index 35508087c0c5ed4f80a969dfffd989c5a6d9eeed..e2e647d511c17b56786e886c83f71de140078dbd 100644 (file)
 
 #define CAM_CONTENT_COUNT                              8
 
-#define CFG_DEFAULT_KEY                                        BIT(5)
 #define CFG_VALID                                      BIT(15)
 
 #define PAIRWISE_KEYIDX                                        0
 #define CAM_PAIRWISE_KEY_POSITION                      4
 
-#define        CAM_CONFIG_USEDK                                1
 #define        CAM_CONFIG_NO_USEDK                             0
 
 void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
index a31a12775f1a0ff114c63e9ee17dfddc31eee273..3b3a88b53b119909112a806ee71ab4d4bfa67a79 100644 (file)
@@ -195,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        if (!(support_remote_wakeup &&
              rtlhal->enter_pnp_sleep)) {
                mac->link_state = MAC80211_NOLINK;
-               memset(mac->bssid, 0, 6);
+               eth_zero_addr(mac->bssid);
                mac->vendor = PEER_UNKNOWN;
 
                /* reset sec info */
@@ -357,7 +357,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
        mac->p2p = 0;
        mac->vif = NULL;
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, ETH_ALEN);
+       eth_zero_addr(mac->bssid);
        mac->vendor = PEER_UNKNOWN;
        mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
        rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -1157,7 +1157,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
                                rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
                        mac->link_state = MAC80211_NOLINK;
-                       memset(mac->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(mac->bssid);
                        mac->vendor = PEER_UNKNOWN;
                        mac->mode = 0;
 
index 7b64e34f421e0d4ec41c072f07ed096c7d53ce83..82733c6b8c46e66d79a6d1a74cf1903214744ef7 100644 (file)
@@ -33,8 +33,6 @@
        FIF_FCSFAIL | \
        FIF_BCN_PRBRESP_PROMISC)
 
-#define RTL_SUPPORTED_CTRL_FILTER      0xFF
-
 #define DM_DIG_THRESH_HIGH             40
 #define DM_DIG_THRESH_LOW              35
 #define DM_FALSEALARM_THRESH_LOW       400
index fdab8240a5d79da027f641ad52c170a658febdb7..be02e7894c61d20bbb0eedd70d539d0da66120ff 100644 (file)
 #define PG_STATE_WORD_3                        0x10
 #define PG_STATE_DATA                  0x20
 
-#define PG_SWBYTE_H                    0x01
-#define PG_SWBYTE_L                    0x02
-
-#define _POWERON_DELAY_
-#define _PRE_EXECUTE_READ_CMD_
-
 #define EFUSE_REPEAT_THRESHOLD_                3
 #define EFUSE_ERROE_HANDLE             1
 
index d9ea9d0c79a526694210f7f9f46c7e9747a31285..0532b985244445d81f925dfd960950688a95de44 100644 (file)
 #ifndef __RTL92C_DEF_H__
 #define __RTL92C_DEF_H__
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
-#define RESET_DELAY_8185                               20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                             0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA                   0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                        0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                  0
-#define HW_THREE_WIRE                                  2
-
-#define BT_DEMO_BOARD                                  0
-#define BT_QA_BOARD                                    1
-#define BT_FPGA                                                2
-
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                        0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                              10
-
 #define RX_MPDU_QUEUE                                  0
 #define RX_CMD_QUEUE                                   1
-#define RX_MAX_QUEUE                                   2
-#define AC2QUEUEID(_AC)                                        (_AC)
 
 #define        C2H_RX_CMD_HDR_LEN                              8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)                   \
index f2b9713c456e14e8a89726ec989113a4635a1767..edc2cbb6253c9263bed3fd294301cd79ea69f32d 100644 (file)
@@ -566,7 +566,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 3f6c59cdeababd42c1ab5692ffe15c9d2b897956..a2bb02c7b837679455daf529fc84b5214068a33b 100644 (file)
@@ -452,9 +452,10 @@ static void handle_branch1(struct ieee80211_hw *hw, u16 arraylen,
                                READ_NEXT_PAIR(v1, v2, i);
                                while (v2 != 0xDEAD &&
                                       v2 != 0xCDEF &&
-                                      v2 != 0xCDCD && i < arraylen - 2)
+                                      v2 != 0xCDCD && i < arraylen - 2) {
                                        _rtl8188e_config_bb_reg(hw, v1, v2);
                                        READ_NEXT_PAIR(v1, v2, i);
+                               }
 
                                while (v2 != 0xDEAD && i < arraylen - 2)
                                        READ_NEXT_PAIR(v1, v2, i);
index 5c1472d88fd4496f123d43675f506c137a2dd92b..0eca030e32383d3d00342fe2ff18cab5d219675e 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL92C_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                     u8 bandwidth);
index 9b660df6fd712fd517638723bb0616b17b9df257..690a7a1675e2019c16932e08b4cfa86731e03566 100644 (file)
 #ifndef __RTL92C_DEF_H__
 #define __RTL92C_DEF_H__
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
 #define        PHY_RSSI_SLID_WIN_MAX                           100
 #define        PHY_LINKQUALITY_SLID_WIN_MAX                    20
 #define        PHY_BEACON_RSSI_SLID_WIN_MAX                    10
 
-#define RESET_DELAY_8185                               20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                             0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA                   0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                        0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                  0
-#define HW_THREE_WIRE                                  2
-
-#define BT_DEMO_BOARD                                  0
-#define BT_QA_BOARD                                    1
-#define BT_FPGA                                                2
-
 #define RX_SMOOTH_FACTOR                               20
 
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                        0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                              10
-
 #define RX_MPDU_QUEUE                                  0
 #define RX_CMD_QUEUE                                   1
-#define RX_MAX_QUEUE                                   2
-#define AC2QUEUEID(_AC)                                        (_AC)
 
 #define        C2H_RX_CMD_HDR_LEN                              8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)           \
index 303b299376c95b46da33dbec2d1159cec746a93c..04eb5c3f84640d4702402fc7d1dfeeb79f790c32 100644 (file)
@@ -363,7 +363,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_ViqEn);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index d8fe68b389d213e17b133b7044fec647b315f2b8..ebd72cae10b6ecf680deb765a9ce26e779d7c6fc 100644 (file)
@@ -31,7 +31,6 @@
 #define __RTL92C_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
 void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
index fe4b699a12f5b4d6bdc9d883584d356e9670e24e..0c20dd74d6ecd862043321f4e5e1b9d0e7ff3693 100644 (file)
@@ -1589,6 +1589,8 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HW_VAR_DATA_FILTER:
                *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
                break;
+       case HAL_DEF_WOWLAN:
+               break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case not processed\n");
@@ -1871,7 +1873,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_ViqEn);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index c1e33b0228c0176e1ae31f5a8f99085a4f840397..67588083e6cc7ae9009c57a1fd8b07bd178c6e4d 100644 (file)
@@ -32,8 +32,6 @@
 
 #define H2C_RA_MASK    6
 
-#define LLT_POLLING_LLT_THRESHOLD              20
-#define LLT_POLLING_READY_TIMEOUT_COUNT                100
 #define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER                255
 
 #define RX_PAGE_SIZE_REG_VALUE                 PBP_128
index 11b439d6b67167c11e1529919bc5f07dfe96e060..6f987de5b4413ee74b71fbb7ddef9cb39929954a 100644 (file)
@@ -31,7 +31,6 @@
 #define __RTL92CU_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
 void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
index 939c905f547fd179c8e1af49b266893bdbc14694..0a443ed17cf4760dcc38475741450e6328196cce 100644 (file)
 #define        MAX_MSS_DENSITY_1T                              0x0A
 
 #define RF6052_MAX_TX_PWR                              0x3F
-#define RF6052_MAX_REG                                 0x3F
 #define RF6052_MAX_PATH                                        2
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
 #define        PHY_RSSI_SLID_WIN_MAX                           100
 #define        PHY_LINKQUALITY_SLID_WIN_MAX                    20
 #define        PHY_BEACON_RSSI_SLID_WIN_MAX                    10
 
-#define RESET_DELAY_8185                               20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
 #define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
 
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                             0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA                   0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH                   0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM                 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM                 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                        0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                  0
-#define HW_THREE_WIRE                                  2
-
-#define BT_DEMO_BOARD                                  0
-#define BT_QA_BOARD                                    1
-#define BT_FPGA                                                2
-
 #define RX_SMOOTH_FACTOR                               20
 
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                        0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                              10
-
 #define RX_MPDU_QUEUE                                  0
 #define RX_CMD_QUEUE                                   1
-#define RX_MAX_QUEUE                                   2
 
 #define        C2H_RX_CMD_HDR_LEN                              8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)                   \
index b461b3128da581bdb17ee53b1e5e9238472ca95b..db230a3f0137e0650f3e64d3ae87dfd20d6c5e87 100644 (file)
@@ -562,7 +562,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
index 8bdeed3c064ebb43e3141bb4305d4832c7f4e3c3..039c0133ad6b5a1b8d6869ded381454a886eb43d 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL92E_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                      u8 bandwidth);
index ef87c09b77d0c877ad10d3beddbc8045f0d9de43..41466f957cdc055b4f00588ab95f1281c8ea2d77 100644 (file)
@@ -31,7 +31,6 @@
 
 #define RX_MPDU_QUEUE                          0
 #define RX_CMD_QUEUE                           1
-#define RX_MAX_QUEUE                           2
 
 #define SHORT_SLOT_TIME                                9
 #define NON_SHORT_SLOT_TIME                    20
index 5761d5b49e39e4e9cb2ce703f39578ad1cf99e74..dee88a80bee136946523a37076b4bdbaee14cf44 100644 (file)
@@ -293,7 +293,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_ViqEn);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 94bdd4bbca5dfcfb6aa537fd70365180df2dd8bf..bcdf2273688ebad1224e99166036c75c859adfbe 100644 (file)
 #ifndef __RTL8723E_DEF_H__
 #define __RTL8723E_DEF_H__
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
-#define RESET_DELAY_8185                                       20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                                     0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA           0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                0x00
-
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                          0
-#define HW_THREE_WIRE                                          2
-
-#define BT_DEMO_BOARD                                          0
-#define BT_QA_BOARD                                                    1
-#define BT_FPGA                                                                2
-
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                                      10
-
 #define RX_MPDU_QUEUE                                          0
 #define RX_CMD_QUEUE                                           1
-#define RX_MAX_QUEUE                                           2
-#define AC2QUEUEID(_AC)                                                (_AC)
 
 #define        C2H_RX_CMD_HDR_LEN                                      8
 #define        GET_C2H_CMD_CMD_LEN(__prxhdr)           \
index aa085462d0e9592d26d7d57418d451bbbd9ae8ae..b3b094759f6dc1d76ea3db83392d64599262666e 100644 (file)
@@ -362,7 +362,7 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~ACMHW_VIQEN);
                                        break;
                                case AC3_VO:
-                                       acm_ctrl &= (~ACMHW_BEQEN);
+                                       acm_ctrl &= (~ACMHW_VOQEN);
                                        break;
                                default:
                                        RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
index f3f45b16361f3b98f1a374356290ee7283952903..7b44ebc0fac978d4b746e052583aa5cc96d38426 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL8723E_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                       u8 bandwidth);
index 6dad28e77bbb6c6f07f53ff2ea38e39996e9cd6e..b46998341c409ea00cdb635f5132954c305b2a49 100644 (file)
@@ -603,7 +603,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
index a6fea106ced4a92f3d08045f63a6ce1876ac883d..f423e157020ffb631f31806ac6cc25347afd2359 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL8723BE_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                        u8 bandwidth);
index ee7c208bd070944850c5f560fcaff58d432e1a18..dfbdf539de1a1fc0b6163543c462082c6301a9c4 100644 (file)
 #define        WIFI_NAV_UPPER_US                               30000
 #define HAL_92C_NAV_UPPER_UNIT                 128
 
-#define HAL_RETRY_LIMIT_INFRA                          48
-#define HAL_RETRY_LIMIT_AP_ADHOC                       7
-
-#define RESET_DELAY_8185                                       20
-
-#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE                          10
-#define NUM_OF_PAGES_IN_FW                                     0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO                     0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA           0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD                    0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH           0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN                    0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB                    0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM         0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM         0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                0x00
-
 #define MAX_RX_DMA_BUFFER_SIZE                         0x3E80
 
-#define MAX_LINES_HWCONFIG_TXT                         1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT                    256
-
-#define SW_THREE_WIRE                                          0
-#define HW_THREE_WIRE                                          2
-
-#define BT_DEMO_BOARD                                          0
-#define BT_QA_BOARD                                                    1
-#define BT_FPGA                                                                2
-
 #define HAL_PRIME_CHNL_OFFSET_DONT_CARE                0
 #define HAL_PRIME_CHNL_OFFSET_LOWER                    1
 #define HAL_PRIME_CHNL_OFFSET_UPPER                    2
 
-#define MAX_H2C_QUEUE_NUM                                      10
-
 #define RX_MPDU_QUEUE                                          0
 #define RX_CMD_QUEUE                                           1
-#define RX_MAX_QUEUE                                           2
-#define AC2QUEUEID(_AC)                                                (_AC)
 
 #define MAX_RX_DMA_BUFFER_SIZE_8812    0x3E80
 
index 8ec8200002c7311025b3ae645c9956bf08c44a17..2a0a71bac00c84d331972fd27e234fafe70ec5f3 100644 (file)
@@ -667,7 +667,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VIQEN);
                                break;
                        case AC3_VO:
-                               acm_ctrl &= (~ACMHW_BEQEN);
+                               acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
                                RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1515,7 +1515,7 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
                                      (u8 *)(&support_remote_wakeup));
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-                "boundary=0x%#X, NPQ_RQPNValue=0x%#X, RQPNValue=0x%#X\n",
+                "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
                  boundary, npq_rqpn_value, rqpn_val);
 
        /* stop PCIe DMA
index d9582ee1c335462de324f9d347cdb96be73a971c..efd22bd0b139f2880aa5fd547f81c5976846cb4c 100644 (file)
@@ -27,7 +27,6 @@
 #define __RTL8821AE_RF_H__
 
 #define RF6052_MAX_TX_PWR              0x3F
-#define RF6052_MAX_REG                 0x3F
 
 void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
                                        u8 bandwidth);
index 46ee956d0235dc79dbb8130242b5b2586fd4db5e..f0188c83c79f7d6027bdee6372768d3657347a2a 100644 (file)
@@ -701,12 +701,18 @@ free:
 
 static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
 {
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        struct urb *urb;
 
        usb_kill_anchored_urbs(&rtlusb->rx_submitted);
 
        tasklet_kill(&rtlusb->rx_work_tasklet);
+       cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+       flush_workqueue(rtlpriv->works.rtl_wq);
+       destroy_workqueue(rtlpriv->works.rtl_wq);
+
        skb_queue_purge(&rtlusb->rx_queue);
 
        while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@@ -794,8 +800,6 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw)
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        struct ieee80211_tx_info *txinfo;
 
-       SET_USB_STOP(rtlusb);
-
        /* clean up rx stuff. */
        _rtl_usb_cleanup_rx(hw);
 
@@ -834,7 +838,6 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
        cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
        /* Enable software */
        SET_USB_STOP(rtlusb);
-       rtl_usb_deinit(hw);
        rtlpriv->cfg->ops->hw_disable(hw);
 }
 
@@ -1147,9 +1150,9 @@ void rtl_usb_disconnect(struct usb_interface *intf)
 
        if (unlikely(!rtlpriv))
                return;
-
        /* just in case driver is removed before firmware callback */
        wait_for_completion(&rtlpriv->firmware_loading_complete);
+       clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
        /*ieee80211_unregister_hw will call ops_stop */
        if (rtlmac->mac80211_registered == 1) {
                ieee80211_unregister_hw(hw);
index d4ba009ac9aa62b7d9c24404dda707a132030291..d1e9a13be910b584d5e17394822ba3c1ff2a9ba4 100644 (file)
@@ -468,7 +468,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
        wl1251_tx_flush(wl);
        wl1251_power_off(wl);
 
-       memset(wl->bssid, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
        wl->listen_int = 1;
        wl->bss_type = MAX_BSS_TYPE;
 
@@ -547,7 +547,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
        wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
        wl->vif = NULL;
-       memset(wl->bssid, 0, ETH_ALEN);
+       eth_zero_addr(wl->bssid);
        mutex_unlock(&wl->mutex);
 }
 
index c26fc2106e5bfc3f44f8c7d2cda483824b0ea284..68919f8d4310455fad623381ee16e2f1c96dac69 100644 (file)
@@ -367,7 +367,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
        wl->links[*hlid].allocated_pkts = 0;
        wl->links[*hlid].prev_freed_pkts = 0;
        wl->links[*hlid].ba_bitmap = 0;
-       memset(wl->links[*hlid].addr, 0, ETH_ALEN);
+       eth_zero_addr(wl->links[*hlid].addr);
 
        /*
         * At this point op_tx() will not add more packets to the queues. We
@@ -1293,7 +1293,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        hdr->frame_control = cpu_to_le16(fc);
        memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
        memcpy(hdr->addr2, vif->addr, ETH_ALEN);
-       memset(hdr->addr3, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr3);
 
        ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
                                      skb->data, skb->len, 0,
index f38227afe0998a668c40e2194def6128895bb077..4ae98e2ad719ff4533bb8430cdec633663745bfb 100644 (file)
@@ -438,7 +438,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
         * stolen by an Ethernet bridge for STP purposes.
         * (FE:FF:FF:FF:FF:FF)
         */
-       memset(dev->dev_addr, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(dev->dev_addr);
        dev->dev_addr[0] &= ~0x01;
 
        netif_carrier_off(dev);
index f7a31d2cb3f1819fdf7ebdeb40e0f6bf44aabe0a..c4d68d7684087f6ec66400047be4991c5e51cfe6 100644 (file)
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
        unsigned long flags;
 
        do {
+               int notify;
+
                spin_lock_irqsave(&queue->response_lock, flags);
                make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
                spin_unlock_irqrestore(&queue->response_lock, flags);
+               if (notify)
+                       notify_remote_via_irq(queue->tx_irq);
+
                if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 {
        struct pending_tx_info *pending_tx_info;
        pending_ring_idx_t index;
+       int notify;
        unsigned long flags;
 
        pending_tx_info = &queue->pending_tx_info[pending_idx];
+
        spin_lock_irqsave(&queue->response_lock, flags);
+
        make_tx_response(queue, &pending_tx_info->req, status);
-       index = pending_index(queue->pending_prod);
+
+       /* Release the pending index before pusing the Tx response so
+        * its available before a new Tx request is pushed by the
+        * frontend.
+        */
+       index = pending_index(queue->pending_prod++);
        queue->pending_ring[index] = pending_idx;
-       /* TX shouldn't use the index before we give it back here */
-       mb();
-       queue->pending_prod++;
+
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+
        spin_unlock_irqrestore(&queue->response_lock, flags);
+
+       if (notify)
+               notify_remote_via_irq(queue->tx_irq);
 }
 
 
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
 {
        RING_IDX i = queue->tx.rsp_prod_pvt;
        struct xen_netif_tx_response *resp;
-       int notify;
 
        resp = RING_GET_RESPONSE(&queue->tx, i);
        resp->id     = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
                RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
 
        queue->tx.rsp_prod_pvt = ++i;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
-       if (notify)
-               notify_remote_via_irq(queue->tx_irq);
 }
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
index 110fece2ff537238f999cee084451964293bf429..62426d81a4d656c7dfd57c32a6a5bfd15398c6ca 100644 (file)
@@ -229,7 +229,6 @@ parse_failed:
        resource_list_for_each_entry(window, resources)
                kfree(window->res);
        pci_free_resource_list(resources);
-       kfree(bus_range);
        return err;
 }
 EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
index 389440228c1de104cc1dde0eafbc94697f6373d5..7d1437b01fdd8f586e705c3842ab4e798de8750f 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config PCIEAER
-       boolean "Root Port Advanced Error Reporting support"
+       bool "Root Port Advanced Error Reporting support"
        depends on PCIEPORTBUS
        select RAS
        default y
index 638e797037da973f7756b416a47f37c3ae314025..97527614141bf4a406528503d404ad8efd64695f 100644 (file)
@@ -735,6 +735,31 @@ config INTEL_IPS
          functionality.  If in doubt, say Y here; it will only load on
          supported platforms.
 
+config INTEL_IMR
+       bool "Intel Isolated Memory Region support"
+       default n
+       depends on X86_INTEL_QUARK && IOSF_MBI
+       ---help---
+         This option provides a means to manipulate Isolated Memory Regions.
+         IMRs are a set of registers that define read and write access masks
+         to prohibit certain system agents from accessing memory with 1 KiB
+         granularity.
+
+         IMRs make it possible to control read/write access to an address
+         by hardware agents inside the SoC. Read and write masks can be
+         defined for:
+               - eSRAM flush
+               - Dirty CPU snoop (write only)
+               - RMU access
+               - PCI Virtual Channel 0/Virtual Channel 1
+               - SMM mode
+               - Non SMM mode
+
+         Quark contains a set of eight IMR registers and makes use of those
+         registers during its bootup process.
+
+         If you are running on a Galileo/Quark say Y here.
+
 config IBM_RTL
        tristate "Device driver to enable PRTL support"
        depends on X86 && PCI
index f71700e0d13212cb3a93701d939c9fced2b0f6ca..46b27469387283eed295e664a6c728a639860fa5 100644 (file)
@@ -856,8 +856,8 @@ static void asus_backlight_exit(struct asus_laptop *asus)
  * than count bytes. We set eof to 1 if we handle those 2 values. We return the
  * number of bytes written in page
  */
-static ssize_t show_infos(struct device *dev,
-                         struct device_attribute *attr, char *page)
+static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
+                         char *page)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int len = 0;
@@ -926,6 +926,7 @@ static ssize_t show_infos(struct device *dev,
 
        return len;
 }
+static DEVICE_ATTR_RO(infos);
 
 static int parse_arg(const char *buf, unsigned long count, int *val)
 {
@@ -957,15 +958,15 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
 /*
  * LEDD display
  */
-static ssize_t show_ledd(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t ledd_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "0x%08x\n", asus->ledd_status);
 }
 
-static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
+static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -981,6 +982,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
        }
        return rv;
 }
+static DEVICE_ATTR_RW(ledd);
 
 /*
  * Wireless
@@ -1014,21 +1016,22 @@ static int asus_wlan_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_wlan(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t wlan_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
 }
 
-static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
+static ssize_t wlan_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
 }
+static DEVICE_ATTR_RW(wlan);
 
 /*e
  * Bluetooth
@@ -1042,15 +1045,15 @@ static int asus_bluetooth_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_bluetooth(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
 }
 
-static ssize_t store_bluetooth(struct device *dev,
+static ssize_t bluetooth_store(struct device *dev,
                               struct device_attribute *attr, const char *buf,
                               size_t count)
 {
@@ -1058,6 +1061,7 @@ static ssize_t store_bluetooth(struct device *dev,
 
        return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
 }
+static DEVICE_ATTR_RW(bluetooth);
 
 /*
  * Wimax
@@ -1071,22 +1075,22 @@ static int asus_wimax_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_wimax(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t wimax_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
 }
 
-static ssize_t store_wimax(struct device *dev,
-                              struct device_attribute *attr, const char *buf,
-                              size_t count)
+static ssize_t wimax_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
 }
+static DEVICE_ATTR_RW(wimax);
 
 /*
  * Wwan
@@ -1100,22 +1104,22 @@ static int asus_wwan_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_wwan(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t wwan_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
 }
 
-static ssize_t store_wwan(struct device *dev,
-                              struct device_attribute *attr, const char *buf,
-                              size_t count)
+static ssize_t wwan_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
 }
+static DEVICE_ATTR_RW(wwan);
 
 /*
  * Display
@@ -1135,8 +1139,8 @@ static void asus_set_display(struct asus_laptop *asus, int value)
  * displays hooked up simultaneously, so be warned. See the acpi4asus README
  * for more info.
  */
-static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
+static ssize_t display_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int rv, value;
@@ -1146,6 +1150,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
                asus_set_display(asus, value);
        return rv;
 }
+static DEVICE_ATTR_WO(display);
 
 /*
  * Light Sens
@@ -1167,16 +1172,17 @@ static void asus_als_switch(struct asus_laptop *asus, int value)
        asus->light_switch = value;
 }
 
-static ssize_t show_lssw(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus->light_switch);
 }
 
-static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
+static ssize_t ls_switch_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int rv, value;
@@ -1187,6 +1193,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
 
        return rv;
 }
+static DEVICE_ATTR_RW(ls_switch);
 
 static void asus_als_level(struct asus_laptop *asus, int value)
 {
@@ -1195,16 +1202,16 @@ static void asus_als_level(struct asus_laptop *asus, int value)
        asus->light_level = value;
 }
 
-static ssize_t show_lslvl(struct device *dev,
-                         struct device_attribute *attr, char *buf)
+static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus->light_level);
 }
 
-static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
+static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
+                             const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int rv, value;
@@ -1218,6 +1225,7 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
 
        return rv;
 }
+static DEVICE_ATTR_RW(ls_level);
 
 static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
 {
@@ -1234,8 +1242,8 @@ static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
        return err;
 }
 
-static ssize_t show_lsvalue(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int err, hi, lo;
@@ -1247,6 +1255,7 @@ static ssize_t show_lsvalue(struct device *dev,
                return sprintf(buf, "%d\n", 10 * hi + lo);
        return err;
 }
+static DEVICE_ATTR_RO(ls_value);
 
 /*
  * GPS
@@ -1274,15 +1283,15 @@ static int asus_gps_switch(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_gps(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+static ssize_t gps_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_gps_status(asus));
 }
 
-static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
+static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
                         const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -1298,6 +1307,7 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
        rfkill_set_sw_state(asus->gps.rfkill, !value);
        return rv;
 }
+static DEVICE_ATTR_RW(gps);
 
 /*
  * rfkill
@@ -1569,19 +1579,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
        asus_input_notify(asus, event);
 }
 
-static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
-static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
-static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
-                  show_bluetooth, store_bluetooth);
-static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
-static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
-static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp);
-static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
-static DEVICE_ATTR(ls_value, S_IRUGO, show_lsvalue, NULL);
-static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
-static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
-static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
-
 static struct attribute *asus_attributes[] = {
        &dev_attr_infos.attr,
        &dev_attr_wlan.attr,
@@ -1616,7 +1613,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
                else
                        goto normal;
 
-               return supported;
+               return supported ? attr->mode : 0;
        }
 
 normal:
index 70d355a9ae2cc2f7cdc2efb04941a2c76688139f..55cf10bc78174b2b50094ec3aebf59ded10847b1 100644 (file)
@@ -520,7 +520,7 @@ static acpi_status cmpc_get_accel(acpi_handle handle,
 {
        union acpi_object param[2];
        struct acpi_object_list input;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 };
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
        unsigned char *locs;
        acpi_status status;
 
index 7c21c1c44dfa9acc05bcdf235f12fff4fd8969b0..2a9afa261c615bffb1f1586f2fcb3a287fd33d4b 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/acpi.h>
 #include <linux/dmi.h>
 #include <linux/backlight.h>
+#include <linux/fb.h>
 #include <linux/input.h>
 #include <linux/kfifo.h>
 #include <linux/platform_device.h>
@@ -398,7 +399,7 @@ static int bl_get_brightness(struct backlight_device *b)
 static int bl_update_status(struct backlight_device *b)
 {
        int ret;
-       if (b->props.power == 4)
+       if (b->props.power == FB_BLANK_POWERDOWN)
                ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
        else
                ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
@@ -1139,9 +1140,9 @@ static int __init fujitsu_init(void)
 
        if (!acpi_video_backlight_support()) {
                if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
-                       fujitsu->bl_device->props.power = 4;
+                       fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
                else
-                       fujitsu->bl_device->props.power = 0;
+                       fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
        }
 
        pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
index 66a4d3284aab7cca2cef0ecb85b792d39013bfbb..001b199a8c33d3a90e6693edd6475a63bf6dc6a5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
  *
- * (C) Copyright 2008-2010 Intel Corporation
+ * (C) Copyright 2008-2010,2015 Intel Corporation
  * Author: Sreedhara DS (sreedhara.ds@intel.com)
  *
  * This program is free software; you can redistribute it and/or
 /*
  * IPC register summary
  *
- * IPC register blocks are memory mapped at fixed address of 0xFF11C000
+ * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
  * To read or write information to the SCU, driver writes to IPC-1 memory
- * mapped registers (base address 0xFF11C000). The following is the IPC
- * mechanism
+ * mapped registers. The following is the IPC mechanism
  *
  * 1. IA core cDMI interface claims this transaction and converts it to a
  *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
 #define PCI_DEVICE_ID_CLOVERVIEW       0x08ea
 #define PCI_DEVICE_ID_TANGIER          0x11a0
 
-/* intel scu ipc driver data*/
+/* intel scu ipc driver data */
 struct intel_scu_ipc_pdata_t {
-       u32 ipc_base;
        u32 i2c_base;
-       u32 ipc_len;
        u32 i2c_len;
        u8 irq_mode;
 };
 
 static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
-       .ipc_base = 0xff11c000,
        .i2c_base = 0xff12b000,
-       .ipc_len = 0x100,
        .i2c_len = 0x10,
        .irq_mode = 0,
 };
 
 /* Penwell and Cloverview */
 static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
-       .ipc_base = 0xff11c000,
        .i2c_base = 0xff12b000,
-       .ipc_len = 0x100,
        .i2c_len = 0x10,
        .irq_mode = 1,
 };
 
 static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
-       .ipc_base = 0xff009000,
        .i2c_base  = 0xff00d000,
-       .ipc_len  = 0x100,
        .i2c_len = 0x10,
        .irq_mode = 0,
 };
@@ -114,8 +105,6 @@ struct intel_scu_ipc_dev {
 
 static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
 
-static int platform;           /* Platform type */
-
 /*
  * IPC Read Buffer (Read Only):
  * 16 byte buffer for receiving data from SCU, if IPC command
@@ -160,7 +149,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
  * Format:
  * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
  */
-
 static inline u8 ipc_read_status(void)
 {
        return __raw_readl(ipcdev.ipc_base + 0x04);
@@ -176,23 +164,24 @@ static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
        return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
 }
 
-static inline int busy_loop(void) /* Wait till scu status is busy */
+/* Wait till scu status is busy */
+static inline int busy_loop(void)
 {
-       u32 status = 0;
-       u32 loop_count = 0;
+       u32 status = ipc_read_status();
+       u32 loop_count = 100000;
 
-       status = ipc_read_status();
-       while (status & 1) {
+       /* break if scu doesn't reset busy bit after huge retry */
+       while ((status & BIT(0)) && --loop_count) {
                udelay(1); /* scu processing time is in few u secods */
                status = ipc_read_status();
-               loop_count++;
-               /* break if scu doesn't reset busy bit after huge retry */
-               if (loop_count > 100000) {
-                       dev_err(&ipcdev.pdev->dev, "IPC timed out");
-                       return -ETIMEDOUT;
-               }
        }
-       if ((status >> 1) & 1)
+
+       if (status & BIT(0)) {
+               dev_err(&ipcdev.pdev->dev, "IPC timed out");
+               return -ETIMEDOUT;
+       }
+
+       if (status & BIT(1))
                return -EIO;
 
        return 0;
@@ -210,14 +199,13 @@ static inline int ipc_wait_for_interrupt(void)
        }
 
        status = ipc_read_status();
-
-       if ((status >> 1) & 1)
+       if (status & BIT(1))
                return -EIO;
 
        return 0;
 }
 
-int intel_scu_ipc_check_status(void)
+static int intel_scu_ipc_check_status(void)
 {
        return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
 }
@@ -248,18 +236,18 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
        if (id == IPC_CMD_PCNTRL_R) {
                for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
                        ipc_data_writel(wbuf[nc], offset);
-               ipc_command((count*2) << 16 |  id << 12 | 0 << 8 | op);
+               ipc_command((count * 2) << 16 | id << 12 | 0 << 8 | op);
        } else if (id == IPC_CMD_PCNTRL_W) {
                for (nc = 0; nc < count; nc++, offset += 1)
                        cbuf[offset] = data[nc];
                for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
                        ipc_data_writel(wbuf[nc], offset);
-               ipc_command((count*3) << 16 |  id << 12 | 0 << 8 | op);
+               ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op);
        } else if (id == IPC_CMD_PCNTRL_M) {
                cbuf[offset] = data[0];
                cbuf[offset + 1] = data[1];
                ipc_data_writel(wbuf[0], 0); /* Write wbuff */
-               ipc_command(4 << 16 |  id << 12 | 0 << 8 | op);
+               ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
        }
 
        err = intel_scu_ipc_check_status();
@@ -301,7 +289,7 @@ EXPORT_SYMBOL(intel_scu_ipc_ioread8);
  */
 int intel_scu_ipc_ioread16(u16 addr, u16 *data)
 {
-       u16 x[2] = {addr, addr + 1 };
+       u16 x[2] = {addr, addr + 1};
        return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
 }
 EXPORT_SYMBOL(intel_scu_ipc_ioread16);
@@ -351,7 +339,7 @@ EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
  */
 int intel_scu_ipc_iowrite16(u16 addr, u16 data)
 {
-       u16 x[2] = {addr, addr + 1 };
+       u16 x[2] = {addr, addr + 1};
        return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
 }
 EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
@@ -412,7 +400,6 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 }
 EXPORT_SYMBOL(intel_scu_ipc_writev);
 
-
 /**
  *     intel_scu_ipc_update_register   -       r/m/w a register
  *     @addr: register address
@@ -475,9 +462,8 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
  *     Issue a command to the SCU which involves data transfers. Do the
  *     data copies under the lock but leave it for the caller to interpret
  */
-
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
-                                                       u32 *out, int outlen)
+                         u32 *out, int outlen)
 {
        int i, err;
 
@@ -503,7 +489,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
 }
 EXPORT_SYMBOL(intel_scu_ipc_command);
 
-/*I2C commands */
+/* I2C commands */
 #define IPC_I2C_WRITE 1 /* I2C Write command */
 #define IPC_I2C_READ  2 /* I2C Read command */
 
@@ -577,7 +563,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        int err;
        struct intel_scu_ipc_pdata_t *pdata;
-       resource_size_t pci_resource;
+       resource_size_t base;
 
        if (ipcdev.pdev)                /* We support only one SCU */
                return -EBUSY;
@@ -595,8 +581,8 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (err)
                return err;
 
-       pci_resource = pci_resource_start(dev, 0);
-       if (!pci_resource)
+       base = pci_resource_start(dev, 0);
+       if (!base)
                return -ENOMEM;
 
        init_completion(&ipcdev.cmd_complete);
@@ -604,7 +590,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
                return -EBUSY;
 
-       ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
+       ipcdev.ipc_base = ioremap_nocache(base, pci_resource_len(dev, 0));
        if (!ipcdev.ipc_base)
                return -ENOMEM;
 
@@ -666,9 +652,10 @@ static struct pci_driver ipc_driver = {
        .remove = ipc_remove,
 };
 
-
 static int __init intel_scu_ipc_init(void)
 {
+       int platform;           /* Platform type */
+
        platform = intel_mid_identify_cpu();
        if (platform == 0)
                return -ENODEV;
index ff765d8e1a09f648ad40770cdd59cf6aadaaf7e8..9e701b2256f9571afca37e01a7185fe8e63e8399 100644 (file)
@@ -124,6 +124,10 @@ struct sabi_commands {
        u16 get_wireless_status;
        u16 set_wireless_status;
 
+       /* 0x80 is off, 0x81 is on */
+       u16 get_lid_handling;
+       u16 set_lid_handling;
+
        /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */
        u16 kbd_backlight;
 
@@ -194,6 +198,9 @@ static const struct sabi_config sabi_configs[] = {
                        .get_wireless_status = 0xFFFF,
                        .set_wireless_status = 0xFFFF,
 
+                       .get_lid_handling = 0xFFFF,
+                       .set_lid_handling = 0xFFFF,
+
                        .kbd_backlight = 0xFFFF,
 
                        .set_linux = 0x0a,
@@ -254,6 +261,9 @@ static const struct sabi_config sabi_configs[] = {
                        .get_wireless_status = 0x69,
                        .set_wireless_status = 0x6a,
 
+                       .get_lid_handling = 0x6d,
+                       .set_lid_handling = 0x6e,
+
                        .kbd_backlight = 0x78,
 
                        .set_linux = 0xff,
@@ -353,6 +363,8 @@ struct samsung_quirks {
        bool broken_acpi_video;
        bool four_kbd_backlight_levels;
        bool enable_kbd_backlight;
+       bool use_native_backlight;
+       bool lid_handling;
 };
 
 static struct samsung_quirks samsung_unknown = {};
@@ -361,11 +373,19 @@ static struct samsung_quirks samsung_broken_acpi_video = {
        .broken_acpi_video = true,
 };
 
+static struct samsung_quirks samsung_use_native_backlight = {
+       .use_native_backlight = true,
+};
+
 static struct samsung_quirks samsung_np740u3e = {
        .four_kbd_backlight_levels = true,
        .enable_kbd_backlight = true,
 };
 
+static struct samsung_quirks samsung_lid_handling = {
+       .lid_handling = true,
+};
+
 static bool force;
 module_param(force, bool, 0);
 MODULE_PARM_DESC(force,
@@ -748,7 +768,7 @@ static ssize_t set_battery_life_extender(struct device *dev,
        struct samsung_laptop *samsung = dev_get_drvdata(dev);
        int ret, value;
 
-       if (!count || sscanf(buf, "%i", &value) != 1)
+       if (!count || kstrtoint(buf, 0, &value) != 0)
                return -EINVAL;
 
        ret = write_battery_life_extender(samsung, !!value);
@@ -817,7 +837,7 @@ static ssize_t set_usb_charge(struct device *dev,
        struct samsung_laptop *samsung = dev_get_drvdata(dev);
        int ret, value;
 
-       if (!count || sscanf(buf, "%i", &value) != 1)
+       if (!count || kstrtoint(buf, 0, &value) != 0)
                return -EINVAL;
 
        ret = write_usb_charge(samsung, !!value);
@@ -830,10 +850,76 @@ static ssize_t set_usb_charge(struct device *dev,
 static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO,
                   get_usb_charge, set_usb_charge);
 
+static int read_lid_handling(struct samsung_laptop *samsung)
+{
+       const struct sabi_commands *commands = &samsung->config->commands;
+       struct sabi_data data;
+       int retval;
+
+       if (commands->get_lid_handling == 0xFFFF)
+               return -ENODEV;
+
+       memset(&data, 0, sizeof(data));
+       retval = sabi_command(samsung, commands->get_lid_handling,
+                             &data, &data);
+
+       if (retval)
+               return retval;
+
+       return data.data[0] & 0x1;
+}
+
+static int write_lid_handling(struct samsung_laptop *samsung,
+                             int enabled)
+{
+       const struct sabi_commands *commands = &samsung->config->commands;
+       struct sabi_data data;
+
+       memset(&data, 0, sizeof(data));
+       data.data[0] = 0x80 | enabled;
+       return sabi_command(samsung, commands->set_lid_handling,
+                           &data, NULL);
+}
+
+static ssize_t get_lid_handling(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct samsung_laptop *samsung = dev_get_drvdata(dev);
+       int ret;
+
+       ret = read_lid_handling(samsung);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t set_lid_handling(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct samsung_laptop *samsung = dev_get_drvdata(dev);
+       int ret, value;
+
+       if (!count || kstrtoint(buf, 0, &value) != 0)
+               return -EINVAL;
+
+       ret = write_lid_handling(samsung, !!value);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static DEVICE_ATTR(lid_handling, S_IWUSR | S_IRUGO,
+                  get_lid_handling, set_lid_handling);
+
 static struct attribute *platform_attributes[] = {
        &dev_attr_performance_level.attr,
        &dev_attr_battery_life_extender.attr,
        &dev_attr_usb_charge.attr,
+       &dev_attr_lid_handling.attr,
        NULL
 };
 
@@ -956,6 +1042,22 @@ static int __init samsung_rfkill_init(struct samsung_laptop *samsung)
        return 0;
 }
 
+static void samsung_lid_handling_exit(struct samsung_laptop *samsung)
+{
+       if (samsung->quirks->lid_handling)
+               write_lid_handling(samsung, 0);
+}
+
+static int __init samsung_lid_handling_init(struct samsung_laptop *samsung)
+{
+       int retval = 0;
+
+       if (samsung->quirks->lid_handling)
+               retval = write_lid_handling(samsung, 1);
+
+       return retval;
+}
+
 static int kbd_backlight_enable(struct samsung_laptop *samsung)
 {
        const struct sabi_commands *commands = &samsung->config->commands;
@@ -1111,7 +1213,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung)
 }
 
 static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
-                                      struct attribute *attr, int idx)
+                                       struct attribute *attr, int idx)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct platform_device *pdev = to_platform_device(dev);
@@ -1124,6 +1226,8 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
                ok = !!(read_battery_life_extender(samsung) >= 0);
        if (attr == &dev_attr_usb_charge.attr)
                ok = !!(read_usb_charge(samsung) >= 0);
+       if (attr == &dev_attr_lid_handling.attr)
+               ok = !!(read_lid_handling(samsung) >= 0);
 
        return ok ? attr->mode : 0;
 }
@@ -1357,7 +1461,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
        samsung_sabi_diag(samsung);
 
        /* Try to find one of the signatures in memory to find the header */
-       for (i = 0; sabi_configs[i].test_string != 0; ++i) {
+       for (i = 0; sabi_configs[i].test_string != NULL; ++i) {
                samsung->config = &sabi_configs[i];
                loca = find_signature(samsung->f0000_segment,
                                      samsung->config->test_string);
@@ -1436,6 +1540,9 @@ static int samsung_pm_notification(struct notifier_block *nb,
            samsung->quirks->enable_kbd_backlight)
                kbd_backlight_enable(samsung);
 
+       if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling)
+               write_lid_handling(samsung, 1);
+
        return 0;
 }
 
@@ -1507,7 +1614,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
                DMI_MATCH(DMI_BOARD_NAME, "N150P"),
                },
-        .driver_data = &samsung_broken_acpi_video,
+        .driver_data = &samsung_use_native_backlight,
        },
        {
         .callback = samsung_dmi_matched,
@@ -1517,7 +1624,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
                DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
                },
-        .driver_data = &samsung_broken_acpi_video,
+        .driver_data = &samsung_use_native_backlight,
        },
        {
         .callback = samsung_dmi_matched,
@@ -1557,7 +1664,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
                DMI_MATCH(DMI_BOARD_NAME, "N250P"),
                },
-        .driver_data = &samsung_broken_acpi_video,
+        .driver_data = &samsung_use_native_backlight,
        },
        {
         .callback = samsung_dmi_matched,
@@ -1578,6 +1685,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
         .driver_data = &samsung_np740u3e,
        },
+       {
+        .callback = samsung_dmi_matched,
+        .ident = "300V3Z/300V4Z/300V5Z",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"),
+               },
+        .driver_data = &samsung_lid_handling,
+       },
        { },
 };
 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1616,6 +1732,15 @@ static int __init samsung_init(void)
                pr_info("Disabling ACPI video driver\n");
                acpi_video_unregister();
        }
+
+       if (samsung->quirks->use_native_backlight) {
+               pr_info("Using native backlight driver\n");
+               /* Tell acpi-video to not handle the backlight */
+               acpi_video_dmi_promote_vendor();
+               acpi_video_unregister();
+               /* And also do not handle it ourselves */
+               samsung->handle_backlight = false;
+       }
 #endif
 
        ret = samsung_platform_init(samsung);
@@ -1648,6 +1773,10 @@ static int __init samsung_init(void)
        if (ret)
                goto error_leds;
 
+       ret = samsung_lid_handling_init(samsung);
+       if (ret)
+               goto error_lid_handling;
+
        ret = samsung_debugfs_init(samsung);
        if (ret)
                goto error_debugfs;
@@ -1659,6 +1788,8 @@ static int __init samsung_init(void)
        return ret;
 
 error_debugfs:
+       samsung_lid_handling_exit(samsung);
+error_lid_handling:
        samsung_leds_exit(samsung);
 error_leds:
        samsung_rfkill_exit(samsung);
@@ -1683,6 +1814,7 @@ static void __exit samsung_exit(void)
        unregister_pm_notifier(&samsung->pm_nb);
 
        samsung_debugfs_exit(samsung);
+       samsung_lid_handling_exit(samsung);
        samsung_leds_exit(samsung);
        samsung_rfkill_exit(samsung);
        samsung_backlight_exit(samsung);
index 6dd1c0e7dcd9af81a43db55f0c4ba87956e37dde..e51c1e7536077306234e579121cf18badcb45a01 100644 (file)
@@ -1032,7 +1032,7 @@ struct sony_backlight_props {
        u8                      offset;
        u8                      maxlvl;
 };
-struct sony_backlight_props sony_bl_props;
+static struct sony_backlight_props sony_bl_props;
 
 static int sony_backlight_update_status(struct backlight_device *bd)
 {
index c3d11fabc46f21c98c6122497958837ae291b171..3b8ceee7c5cbfd891c87a5fd58c59017db5f34e8 100644 (file)
@@ -196,6 +196,7 @@ enum tpacpi_hkey_event_t {
        /* Key-related user-interface events */
        TP_HKEY_EV_KEY_NUMLOCK          = 0x6000, /* NumLock key pressed */
        TP_HKEY_EV_KEY_FN               = 0x6005, /* Fn key pressed? E420 */
+       TP_HKEY_EV_KEY_FN_ESC           = 0x6060, /* Fn+Esc key pressed X240 */
 
        /* Thermal events */
        TP_HKEY_EV_ALARM_BAT_HOT        = 0x6011, /* battery too hot */
@@ -3456,7 +3457,7 @@ enum ADAPTIVE_KEY_MODE {
        LAYFLAT_MODE
 };
 
-const int adaptive_keyboard_modes[] = {
+static const int adaptive_keyboard_modes[] = {
        HOME_MODE,
 /*     WEB_BROWSER_MODE = 2,
        WEB_CONFERENCE_MODE = 3, */
@@ -3712,6 +3713,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
 
        case TP_HKEY_EV_KEY_NUMLOCK:
        case TP_HKEY_EV_KEY_FN:
+       case TP_HKEY_EV_KEY_FN_ESC:
                /* key press events, we just ignore them as long as the EC
                 * is still reporting them in the normal keyboard stream */
                *send_acpi_ev = false;
@@ -8883,17 +8885,31 @@ static bool __pure __init tpacpi_is_fw_digit(const char c)
        return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
 }
 
-/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
 static bool __pure __init tpacpi_is_valid_fw_id(const char * const s,
                                                const char t)
 {
-       return s && strlen(s) >= 8 &&
+       /*
+        * Most models: xxyTkkWW (#.##c)
+        * Ancient 570/600 and -SL lacks (#.##c)
+        */
+       if (s && strlen(s) >= 8 &&
                tpacpi_is_fw_digit(s[0]) &&
                tpacpi_is_fw_digit(s[1]) &&
                s[2] == t &&
                (s[3] == 'T' || s[3] == 'N') &&
                tpacpi_is_fw_digit(s[4]) &&
-               tpacpi_is_fw_digit(s[5]);
+               tpacpi_is_fw_digit(s[5]))
+               return true;
+
+       /* New models: xxxyTkkW (#.##c); T550 and some others */
+       return s && strlen(s) >= 8 &&
+               tpacpi_is_fw_digit(s[0]) &&
+               tpacpi_is_fw_digit(s[1]) &&
+               tpacpi_is_fw_digit(s[2]) &&
+               s[3] == t &&
+               (s[4] == 'T' || s[4] == 'N') &&
+               tpacpi_is_fw_digit(s[5]) &&
+               tpacpi_is_fw_digit(s[6]);
 }
 
 /* returns 0 - probe ok, or < 0 - probe error.
index fc34a71866ed067624c2a851c29a182e9be48f08..dbcb7a8915b84fb8794fd4daea15825e55a49cdd 100644 (file)
@@ -1,11 +1,10 @@
 /*
  *  toshiba_acpi.c - Toshiba Laptop ACPI Extras
  *
- *
  *  Copyright (C) 2002-2004 John Belmonte
  *  Copyright (C) 2008 Philip Langdale
  *  Copyright (C) 2010 Pierre Ducroquet
- *  Copyright (C) 2014 Azael Avalos
+ *  Copyright (C) 2014-2015 Azael Avalos
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
  *
  *  The devolpment page for this driver is located at
  *  http://memebeam.org/toys/ToshibaAcpiDriver.
  *             engineering the Windows drivers
  *     Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
  *     Rob Miller - TV out and hotkeys help
- *
- *
- *  TODO
- *
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TOSHIBA_ACPI_VERSION   "0.20"
+#define TOSHIBA_ACPI_VERSION   "0.21"
 #define PROC_INTERFACE_VERSION 1
 
 #include <linux/kernel.h>
@@ -57,7 +50,7 @@
 #include <linux/i8042.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 MODULE_AUTHOR("John Belmonte");
 MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
@@ -71,7 +64,8 @@ MODULE_LICENSE("GPL");
 /* Toshiba ACPI method paths */
 #define METHOD_VIDEO_OUT       "\\_SB_.VALX.DSSX"
 
-/* The Toshiba configuration interface is composed of the HCI and the SCI,
+/*
+ * The Toshiba configuration interface is composed of the HCI and the SCI,
  * which are defined as follows:
  *
  * HCI is Toshiba's "Hardware Control Interface" which is supposed to
@@ -108,6 +102,7 @@ MODULE_LICENSE("GPL");
 #define TOS_FIFO_EMPTY                 0x8c00
 #define TOS_DATA_NOT_AVAILABLE         0x8d20
 #define TOS_NOT_INITIALIZED            0x8d50
+#define TOS_NOT_INSTALLED              0x8e00
 
 /* registers */
 #define HCI_FAN                                0x0004
@@ -121,9 +116,14 @@ MODULE_LICENSE("GPL");
 #define HCI_KBD_ILLUMINATION           0x0095
 #define HCI_ECO_MODE                   0x0097
 #define HCI_ACCELEROMETER2             0x00a6
+#define SCI_PANEL_POWER_ON             0x010d
 #define SCI_ILLUMINATION               0x014e
+#define SCI_USB_SLEEP_CHARGE           0x0150
 #define SCI_KBD_ILLUM_STATUS           0x015c
+#define SCI_USB_SLEEP_MUSIC            0x015e
+#define SCI_USB_THREE                  0x0169
 #define SCI_TOUCHPAD                   0x050e
+#define SCI_KBD_FUNCTION_KEYS          0x0522
 
 /* field definitions */
 #define HCI_ACCEL_MASK                 0x7fff
@@ -146,6 +146,15 @@ MODULE_LICENSE("GPL");
 #define SCI_KBD_MODE_ON                        0x8
 #define SCI_KBD_MODE_OFF               0x10
 #define SCI_KBD_TIME_MAX               0x3c001a
+#define SCI_USB_CHARGE_MODE_MASK       0xff
+#define SCI_USB_CHARGE_DISABLED                0x30000
+#define SCI_USB_CHARGE_ALTERNATE       0x30009
+#define SCI_USB_CHARGE_AUTO            0x30021
+#define SCI_USB_CHARGE_BAT_MASK                0x7
+#define SCI_USB_CHARGE_BAT_LVL_OFF     0x1
+#define SCI_USB_CHARGE_BAT_LVL_ON      0x4
+#define SCI_USB_CHARGE_BAT_LVL         0x0200
+#define SCI_USB_CHARGE_RAPID_DSP       0x0300
 
 struct toshiba_acpi_dev {
        struct acpi_device *acpi_dev;
@@ -164,6 +173,7 @@ struct toshiba_acpi_dev {
        int kbd_type;
        int kbd_mode;
        int kbd_time;
+       int usbsc_bat_level;
 
        unsigned int illumination_supported:1;
        unsigned int video_supported:1;
@@ -177,6 +187,12 @@ struct toshiba_acpi_dev {
        unsigned int touchpad_supported:1;
        unsigned int eco_supported:1;
        unsigned int accelerometer_supported:1;
+       unsigned int usb_sleep_charge_supported:1;
+       unsigned int usb_rapid_charge_supported:1;
+       unsigned int usb_sleep_music_supported:1;
+       unsigned int kbd_function_keys_supported:1;
+       unsigned int panel_power_on_supported:1;
+       unsigned int usb_three_supported:1;
        unsigned int sysfs_created:1;
 
        struct mutex mutex;
@@ -264,15 +280,17 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
        { KE_END, 0 },
 };
 
-/* utility
+/*
+ * Utility
  */
 
-static __inline__ void _set_bit(u32 * word, u32 mask, int value)
+static inline void _set_bit(u32 *word, u32 mask, int value)
 {
        *word = (*word & ~mask) | (mask * value);
 }
 
-/* acpi interface wrappers
+/*
+ * ACPI interface wrappers
  */
 
 static int write_acpi_int(const char *methodName, int val)
@@ -283,7 +301,8 @@ static int write_acpi_int(const char *methodName, int val)
        return (status == AE_OK) ? 0 : -EIO;
 }
 
-/* Perform a raw configuration call.  Here we don't care about input or output
+/*
+ * Perform a raw configuration call.  Here we don't care about input or output
  * buffer format.
  */
 static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
@@ -310,15 +329,15 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
                                      (char *)dev->method_hci, &params,
                                      &results);
        if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) {
-               for (i = 0; i < out_objs->package.count; ++i) {
+               for (i = 0; i < out_objs->package.count; ++i)
                        out[i] = out_objs->package.elements[i].integer.value;
-               }
        }
 
        return status;
 }
 
-/* common hci tasks (get or set one or two value)
+/*
+ * Common hci tasks (get or set one or two value)
  *
  * In addition to the ACPI status, the HCI system returns a result which
  * may be useful (such as "not supported").
@@ -338,6 +357,7 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
        u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
        acpi_status status = tci_raw(dev, in, out);
+
        if (ACPI_FAILURE(status))
                return TOS_FAILURE;
 
@@ -355,11 +375,13 @@ static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2)
        return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
 }
 
-static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2)
+static u32 hci_read2(struct toshiba_acpi_dev *dev,
+                    u32 reg, u32 *out1, u32 *out2)
 {
        u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
        u32 out[TCI_WORDS];
        acpi_status status = tci_raw(dev, in, out);
+
        if (ACPI_FAILURE(status))
                return TOS_FAILURE;
 
@@ -369,7 +391,8 @@ static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2
        return out[0];
 }
 
-/* common sci tasks
+/*
+ * Common sci tasks
  */
 
 static int sci_open(struct toshiba_acpi_dev *dev)
@@ -389,6 +412,20 @@ static int sci_open(struct toshiba_acpi_dev *dev)
        } else if (out[0] == TOS_ALREADY_OPEN) {
                pr_info("Toshiba SCI already opened\n");
                return 1;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               /*
+                * Some BIOSes do not have the SCI open/close functions
+                * implemented and return 0x8000 (Not Supported), failing to
+                * register some supported features.
+                *
+                * Simply return 1 if we hit those affected laptops to make the
+                * supported features work.
+                *
+                * In the case that some laptops really do not support the SCI,
+                * all the SCI dependent functions check for TOS_NOT_SUPPORTED,
+                * and thus, not registering support for the queried feature.
+                */
+               return 1;
        } else if (out[0] == TOS_NOT_PRESENT) {
                pr_info("Toshiba SCI is not present\n");
        }
@@ -421,6 +458,7 @@ static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
        u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
        acpi_status status = tci_raw(dev, in, out);
+
        if (ACPI_FAILURE(status))
                return TOS_FAILURE;
 
@@ -529,10 +567,11 @@ static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
                return 0;
        }
 
-       /* Check for keyboard backlight timeout max value,
+       /*
+        * Check for keyboard backlight timeout max value,
         * previous kbd backlight implementation set this to
         * 0x3c0003, and now the new implementation set this
-        * to 0x3c001a, use this to distinguish between them
+        * to 0x3c001a, use this to distinguish between them.
         */
        if (out[3] == SCI_KBD_TIME_MAX)
                dev->kbd_type = 2;
@@ -667,19 +706,37 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
 static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
 {
        acpi_status status;
-       u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+       u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
 
        status = tci_raw(dev, in, out);
-       if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
-               pr_info("ACPI call to get ECO led failed\n");
-               return 0;
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to get ECO led failed\n");
+       } else if (out[0] == TOS_NOT_INSTALLED) {
+               pr_info("ECO led not installed");
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               /*
+                * If we receive 0x8300 (Input Data Error), it means that the
+                * LED device is present, but that we just screwed the input
+                * parameters.
+                *
+                * Let's query the status of the LED to see if we really have a
+                * success response, indicating the actual presense of the LED,
+                * bail out otherwise.
+                */
+               in[3] = 1;
+               status = tci_raw(dev, in, out);
+               if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE)
+                       pr_err("ACPI call to get ECO led failed\n");
+               else if (out[0] == TOS_SUCCESS)
+                       return 1;
        }
 
-       return 1;
+       return 0;
 }
 
-static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev)
+static enum led_brightness
+toshiba_eco_mode_get_status(struct led_classdev *cdev)
 {
        struct toshiba_acpi_dev *dev = container_of(cdev,
                        struct toshiba_acpi_dev, eco_led);
@@ -721,7 +778,8 @@ static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
        u32 out[TCI_WORDS];
        acpi_status status;
 
-       /* Check if the accelerometer call exists,
+       /*
+        * Check if the accelerometer call exists,
         * this call also serves as initialization
         */
        status = tci_raw(dev, in, out);
@@ -760,198 +818,533 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
        return 0;
 }
 
-/* Bluetooth rfkill handlers */
-
-static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
+/* Sleep (Charge and Music) utilities support */
+static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
+                                       u32 *mode)
 {
-       u32 hci_result;
-       u32 value, value2;
+       u32 result;
 
-       value = 0;
-       value2 = 0;
-       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
-       if (hci_result == TOS_SUCCESS)
-               *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
+       if (!sci_open(dev))
+               return -EIO;
 
-       return hci_result;
+       result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
 }
 
-static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
+static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
+                                       u32 mode)
 {
-       u32 hci_result;
-       u32 value, value2;
+       u32 result;
 
-       value = 0;
-       value2 = 0x0001;
-       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+       if (!sci_open(dev))
+               return -EIO;
 
-       *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
-       return hci_result;
+       result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
 }
 
-static int bt_rfkill_set_block(void *data, bool blocked)
+static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
+                                             u32 *mode)
 {
-       struct toshiba_acpi_dev *dev = data;
-       u32 result1, result2;
-       u32 value;
-       int err;
-       bool radio_state;
-
-       value = (blocked == false);
+       u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       mutex_lock(&dev->mutex);
-       if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) {
-               err = -EIO;
-               goto out;
-       }
+       if (!sci_open(dev))
+               return -EIO;
 
-       if (!radio_state) {
-               err = 0;
-               goto out;
+       in[5] = SCI_USB_CHARGE_BAT_LVL;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to get USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER);
-       result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH);
+       *mode = out[2];
 
-       if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS)
-               err = -EIO;
-       else
-               err = 0;
- out:
-       mutex_unlock(&dev->mutex);
-       return err;
+       return 0;
 }
 
-static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
+static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
+                                             u32 mode)
 {
-       bool new_rfk_state;
-       bool value;
-       u32 hci_result;
-       struct toshiba_acpi_dev *dev = data;
+       u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       mutex_lock(&dev->mutex);
+       if (!sci_open(dev))
+               return -EIO;
 
-       hci_result = hci_get_radio_state(dev, &value);
-       if (hci_result != TOS_SUCCESS) {
-               /* Can't do anything useful */
-               mutex_unlock(&dev->mutex);
-               return;
+       in[2] = mode;
+       in[5] = SCI_USB_CHARGE_BAT_LVL;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       new_rfk_state = value;
-
-       mutex_unlock(&dev->mutex);
-
-       if (rfkill_set_hw_state(rfkill, !new_rfk_state))
-               bt_rfkill_set_block(data, true);
+       return 0;
 }
 
-static const struct rfkill_ops toshiba_rfk_ops = {
-       .set_block = bt_rfkill_set_block,
-       .poll = bt_rfkill_poll,
-};
-
-static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
+                                       u32 *state)
 {
-       u32 hci_result;
-       u32 status;
+       u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status);
-       *enabled = !status;
-       return hci_result == TOS_SUCCESS ? 0 : -EIO;
-}
+       if (!sci_open(dev))
+               return -EIO;
 
-static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
-{
-       u32 hci_result;
-       u32 value = !enable;
+       in[5] = SCI_USB_CHARGE_RAPID_DSP;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to get USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED ||
+                  out[0] == TOS_INPUT_DATA_ERROR) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       }
 
-       hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value);
-       return hci_result == TOS_SUCCESS ? 0 : -EIO;
-}
+       *state = out[2];
 
-static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
+       return 0;
+}
 
-static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
+static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
+                                       u32 state)
 {
-       u32 hci_result;
-       u32 value;
-       int brightness = 0;
+       u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       if (dev->tr_backlight_supported) {
-               bool enabled;
-               int ret = get_tr_backlight_status(dev, &enabled);
-               if (ret)
-                       return ret;
-               if (enabled)
-                       return 0;
-               brightness++;
-       }
+       if (!sci_open(dev))
+               return -EIO;
 
-       hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value);
-       if (hci_result == TOS_SUCCESS)
-               return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+       in[2] = state;
+       in[5] = SCI_USB_CHARGE_RAPID_DSP;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
 
-       return -EIO;
+       return 0;
 }
 
-static int get_lcd_brightness(struct backlight_device *bd)
+static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
 {
-       struct toshiba_acpi_dev *dev = bl_get_data(bd);
-       return __get_lcd_brightness(dev);
-}
+       u32 result;
 
-static int lcd_proc_show(struct seq_file *m, void *v)
-{
-       struct toshiba_acpi_dev *dev = m->private;
-       int value;
-       int levels;
+       if (!sci_open(dev))
+               return -EIO;
 
-       if (!dev->backlight_dev)
+       result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
                return -ENODEV;
-
-       levels = dev->backlight_dev->props.max_brightness + 1;
-       value = get_lcd_brightness(dev->backlight_dev);
-       if (value >= 0) {
-               seq_printf(m, "brightness:              %d\n", value);
-               seq_printf(m, "brightness_levels:       %d\n", levels);
-               return 0;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       pr_err("Error reading LCD brightness\n");
-       return -EIO;
+       return 0;
 }
 
-static int lcd_proc_open(struct inode *inode, struct file *file)
+static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
 {
-       return single_open(file, lcd_proc_show, PDE_DATA(inode));
-}
+       u32 result;
 
-static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
-{
-       u32 hci_result;
+       if (!sci_open(dev))
+               return -EIO;
 
-       if (dev->tr_backlight_supported) {
-               bool enable = !value;
-               int ret = set_tr_backlight_status(dev, enable);
-               if (ret)
-                       return ret;
-               if (value)
-                       value--;
+       result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       value = value << HCI_LCD_BRIGHTNESS_SHIFT;
-       hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
-       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+       return 0;
 }
 
-static int set_lcd_status(struct backlight_device *bd)
+/* Keyboard function keys */
+static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode)
 {
-       struct toshiba_acpi_dev *dev = bl_get_data(bd);
-       return set_lcd_brightness(dev, bd->props.brightness);
-}
+       u32 result;
 
-static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to get KBD function keys failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("KBD function keys not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to set KBD function keys failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("KBD function keys not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/* Panel Power ON */
+static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_read(dev, SCI_PANEL_POWER_ON, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to get Panel Power ON failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("Panel Power on not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_write(dev, SCI_PANEL_POWER_ON, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set Panel Power ON failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("Panel Power ON not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/* USB Three */
+static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_read(dev, SCI_USB_THREE, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to get USB 3 failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB 3 not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_write(dev, SCI_USB_THREE, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB 3 failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB 3 not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/* Bluetooth rfkill handlers */
+
+static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
+{
+       u32 hci_result;
+       u32 value, value2;
+
+       value = 0;
+       value2 = 0;
+       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+       if (hci_result == TOS_SUCCESS)
+               *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
+
+       return hci_result;
+}
+
+static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
+{
+       u32 hci_result;
+       u32 value, value2;
+
+       value = 0;
+       value2 = 0x0001;
+       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+
+       *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
+       return hci_result;
+}
+
+static int bt_rfkill_set_block(void *data, bool blocked)
+{
+       struct toshiba_acpi_dev *dev = data;
+       u32 result1, result2;
+       u32 value;
+       int err;
+       bool radio_state;
+
+       value = (blocked == false);
+
+       mutex_lock(&dev->mutex);
+       if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) {
+               err = -EIO;
+               goto out;
+       }
+
+       if (!radio_state) {
+               err = 0;
+               goto out;
+       }
+
+       result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER);
+       result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH);
+
+       if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS)
+               err = -EIO;
+       else
+               err = 0;
+ out:
+       mutex_unlock(&dev->mutex);
+       return err;
+}
+
+static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
+{
+       bool new_rfk_state;
+       bool value;
+       u32 hci_result;
+       struct toshiba_acpi_dev *dev = data;
+
+       mutex_lock(&dev->mutex);
+
+       hci_result = hci_get_radio_state(dev, &value);
+       if (hci_result != TOS_SUCCESS) {
+               /* Can't do anything useful */
+               mutex_unlock(&dev->mutex);
+               return;
+       }
+
+       new_rfk_state = value;
+
+       mutex_unlock(&dev->mutex);
+
+       if (rfkill_set_hw_state(rfkill, !new_rfk_state))
+               bt_rfkill_set_block(data, true);
+}
+
+static const struct rfkill_ops toshiba_rfk_ops = {
+       .set_block = bt_rfkill_set_block,
+       .poll = bt_rfkill_poll,
+};
+
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+       u32 hci_result;
+       u32 status;
+
+       hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status);
+       *enabled = !status;
+       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+       u32 hci_result;
+       u32 value = !enable;
+
+       hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value);
+       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static struct proc_dir_entry *toshiba_proc_dir /*= 0*/;
+
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
+{
+       u32 hci_result;
+       u32 value;
+       int brightness = 0;
+
+       if (dev->tr_backlight_supported) {
+               bool enabled;
+               int ret = get_tr_backlight_status(dev, &enabled);
+
+               if (ret)
+                       return ret;
+               if (enabled)
+                       return 0;
+               brightness++;
+       }
+
+       hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value);
+       if (hci_result == TOS_SUCCESS)
+               return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+
+       return -EIO;
+}
+
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+       struct toshiba_acpi_dev *dev = bl_get_data(bd);
+
+       return __get_lcd_brightness(dev);
+}
+
+static int lcd_proc_show(struct seq_file *m, void *v)
+{
+       struct toshiba_acpi_dev *dev = m->private;
+       int value;
+       int levels;
+
+       if (!dev->backlight_dev)
+               return -ENODEV;
+
+       levels = dev->backlight_dev->props.max_brightness + 1;
+       value = get_lcd_brightness(dev->backlight_dev);
+       if (value >= 0) {
+               seq_printf(m, "brightness:              %d\n", value);
+               seq_printf(m, "brightness_levels:       %d\n", levels);
+               return 0;
+       }
+
+       pr_err("Error reading LCD brightness\n");
+       return -EIO;
+}
+
+static int lcd_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, lcd_proc_show, PDE_DATA(inode));
+}
+
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
+{
+       u32 hci_result;
+
+       if (dev->tr_backlight_supported) {
+               bool enable = !value;
+               int ret = set_tr_backlight_status(dev, enable);
+
+               if (ret)
+                       return ret;
+               if (value)
+                       value--;
+       }
+
+       value = value << HCI_LCD_BRIGHTNESS_SHIFT;
+       hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
+       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_lcd_status(struct backlight_device *bd)
+{
+       struct toshiba_acpi_dev *dev = bl_get_data(bd);
+
+       return set_lcd_brightness(dev, bd->props.brightness);
+}
+
+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
                              size_t count, loff_t *pos)
 {
        struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
@@ -1005,6 +1398,7 @@ static int video_proc_show(struct seq_file *m, void *v)
                int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
                int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
                int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
+
                seq_printf(m, "lcd_out:                 %d\n", is_lcd);
                seq_printf(m, "crt_out:                 %d\n", is_crt);
                seq_printf(m, "tv_out:                  %d\n", is_tv);
@@ -1042,9 +1436,9 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
 
        buffer = cmd;
 
-       /* scan expression.  Multiple expressions may be delimited with ;
-        *
-        *  NOTE: to keep scanning simple, invalid fields are ignored
+       /*
+        * Scan expression.  Multiple expressions may be delimited with ;
+        * NOTE: To keep scanning simple, invalid fields are ignored.
         */
        while (remain) {
                if (sscanf(buffer, " lcd_out : %i", &value) == 1)
@@ -1053,12 +1447,11 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
                        crt_out = value & 1;
                else if (sscanf(buffer, " tv_out : %i", &value) == 1)
                        tv_out = value & 1;
-               /* advance to one character past the next ; */
+               /* Advance to one character past the next ; */
                do {
                        ++buffer;
                        --remain;
-               }
-               while (remain && *(buffer - 1) != ';');
+               } while (remain && *(buffer - 1) != ';');
        }
 
        kfree(cmd);
@@ -1066,13 +1459,15 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
        ret = get_video_status(dev, &video_out);
        if (!ret) {
                unsigned int new_video_out = video_out;
+
                if (lcd_out != -1)
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
                if (crt_out != -1)
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
                if (tv_out != -1)
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
-               /* To avoid unnecessary video disruption, only write the new
+               /*
+                * To avoid unnecessary video disruption, only write the new
                 * video setting if something changed. */
                if (new_video_out != video_out)
                        ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
@@ -1135,10 +1530,10 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
        if (sscanf(cmd, " force_on : %i", &value) == 1 &&
            value >= 0 && value <= 1) {
                hci_result = hci_write1(dev, HCI_FAN, value);
-               if (hci_result != TOS_SUCCESS)
-                       return -EIO;
-               else
+               if (hci_result == TOS_SUCCESS)
                        dev->force_fan = value;
+               else
+                       return -EIO;
        } else {
                return -EINVAL;
        }
@@ -1167,11 +1562,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
                        dev->key_event_valid = 1;
                        dev->last_key_event = value;
                } else if (hci_result == TOS_FIFO_EMPTY) {
-                       /* better luck next time */
+                       /* Better luck next time */
                } else if (hci_result == TOS_NOT_SUPPORTED) {
-                       /* This is a workaround for an unresolved issue on
+                       /*
+                        * This is a workaround for an unresolved issue on
                         * some machines where system events sporadically
-                        * become disabled. */
+                        * become disabled.
+                        */
                        hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
                        pr_notice("Re-enabled hotkeys\n");
                } else {
@@ -1203,11 +1600,10 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
                return -EFAULT;
        cmd[len] = '\0';
 
-       if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) {
+       if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0)
                dev->key_event_valid = 0;
-       } else {
+       else
                return -EINVAL;
-       }
 
        return count;
 }
@@ -1241,7 +1637,8 @@ static const struct file_operations version_proc_fops = {
        .release        = single_release,
 };
 
-/* proc and module init
+/*
+ * Proc and module init
  */
 
 #define PROC_TOSHIBA           "toshiba"
@@ -1286,66 +1683,56 @@ static const struct backlight_ops toshiba_backlight_data = {
 /*
  * Sysfs files
  */
-static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count);
-static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf);
-static ssize_t toshiba_kbd_type_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf);
-static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf);
-static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
-                                           struct device_attribute *attr,
-                                           const char *buf, size_t count);
-static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
-                                          struct device_attribute *attr,
-                                          char *buf);
-static ssize_t toshiba_touchpad_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count);
-static ssize_t toshiba_touchpad_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf);
-static ssize_t toshiba_position_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf);
-
-static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
-                  toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
-static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL);
-static DEVICE_ATTR(available_kbd_modes, S_IRUGO,
-                  toshiba_available_kbd_modes_show, NULL);
-static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
-                  toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
-static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
-                  toshiba_touchpad_show, toshiba_touchpad_store);
-static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
+static ssize_t version_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION);
+}
+static DEVICE_ATTR_RO(version);
 
-static struct attribute *toshiba_attributes[] = {
-       &dev_attr_kbd_backlight_mode.attr,
-       &dev_attr_kbd_type.attr,
-       &dev_attr_available_kbd_modes.attr,
-       &dev_attr_kbd_backlight_timeout.attr,
-       &dev_attr_touchpad.attr,
-       &dev_attr_position.attr,
-       NULL,
-};
+static ssize_t fan_store(struct device *dev,
+                        struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 result;
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       result = hci_write1(toshiba, HCI_FAN, state);
+       if (result == TOS_FAILURE)
+               return -EIO;
+       else if (result == TOS_NOT_SUPPORTED)
+               return -ENODEV;
+
+       return count;
+}
+
+static ssize_t fan_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 value;
+       int ret;
 
-static umode_t toshiba_sysfs_is_visible(struct kobject *,
-                                       struct attribute *, int);
+       ret = get_fan_status(toshiba, &value);
+       if (ret)
+               return ret;
 
-static struct attribute_group toshiba_attr_group = {
-       .is_visible = toshiba_sysfs_is_visible,
-       .attrs = toshiba_attributes,
-};
+       return sprintf(buf, "%d\n", value);
+}
+static DEVICE_ATTR_RW(fan);
 
-static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
+static ssize_t kbd_backlight_mode_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        int mode;
@@ -1369,7 +1756,8 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
                        return -EINVAL;
        }
 
-       /* Set the Keyboard Backlight Mode where:
+       /*
+        * Set the Keyboard Backlight Mode where:
         *      Auto - KBD backlight turns off automatically in given time
         *      FN-Z - KBD backlight "toggles" when hotkey pressed
         *      ON   - KBD backlight is always on
@@ -1400,9 +1788,9 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
        return count;
 }
 
-static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+static ssize_t kbd_backlight_mode_show(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 time;
@@ -1412,19 +1800,20 @@ static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
 
        return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK);
 }
+static DEVICE_ATTR_RW(kbd_backlight_mode);
 
-static ssize_t toshiba_kbd_type_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf)
+static ssize_t kbd_type_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", toshiba->kbd_type);
 }
+static DEVICE_ATTR_RO(kbd_type);
 
-static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
-                                               struct device_attribute *attr,
-                                               char *buf)
+static ssize_t available_kbd_modes_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
 
@@ -1435,10 +1824,11 @@ static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
        return sprintf(buf, "%x %x %x\n",
                       SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF);
 }
+static DEVICE_ATTR_RO(available_kbd_modes);
 
-static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
-                                           struct device_attribute *attr,
-                                           const char *buf, size_t count)
+static ssize_t kbd_backlight_timeout_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        int time;
@@ -1479,9 +1869,9 @@ static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
        return count;
 }
 
-static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
-                                          struct device_attribute *attr,
-                                          char *buf)
+static ssize_t kbd_backlight_timeout_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 time;
@@ -1491,10 +1881,11 @@ static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
 
        return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
 }
+static DEVICE_ATTR_RW(kbd_backlight_timeout);
 
-static ssize_t toshiba_touchpad_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count)
+static ssize_t touchpad_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        int state;
@@ -1514,8 +1905,8 @@ static ssize_t toshiba_touchpad_store(struct device *dev,
        return count;
 }
 
-static ssize_t toshiba_touchpad_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
+static ssize_t touchpad_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 state;
@@ -1527,9 +1918,10 @@ static ssize_t toshiba_touchpad_show(struct device *dev,
 
        return sprintf(buf, "%i\n", state);
 }
+static DEVICE_ATTR_RW(touchpad);
 
-static ssize_t toshiba_position_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
+static ssize_t position_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 xyval, zval, tmp;
@@ -1548,6 +1940,336 @@ static ssize_t toshiba_position_show(struct device *dev,
 
        return sprintf(buf, "%d %d %d\n", x, y, z);
 }
+static DEVICE_ATTR_RO(position);
+
+static ssize_t usb_sleep_charge_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 mode;
+       int ret;
+
+       ret = toshiba_usb_sleep_charge_get(toshiba, &mode);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK);
+}
+
+static ssize_t usb_sleep_charge_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 mode;
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       /*
+        * Check for supported values, where:
+        * 0 - Disabled
+        * 1 - Alternate (Non USB conformant devices that require more power)
+        * 2 - Auto (USB conformant devices)
+        */
+       if (state != 0 && state != 1 && state != 2)
+               return -EINVAL;
+
+       /* Set the USB charging mode to internal value */
+       if (state == 0)
+               mode = SCI_USB_CHARGE_DISABLED;
+       else if (state == 1)
+               mode = SCI_USB_CHARGE_ALTERNATE;
+       else if (state == 2)
+               mode = SCI_USB_CHARGE_AUTO;
+
+       ret = toshiba_usb_sleep_charge_set(toshiba, mode);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_sleep_charge);
+
+static ssize_t sleep_functions_on_battery_show(struct device *dev,
+                                              struct device_attribute *attr,
+                                              char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int bat_lvl;
+       int status;
+       int ret;
+       int tmp;
+
+       ret = toshiba_sleep_functions_status_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       /* Determine the status: 0x4 - Enabled | 0x1 - Disabled */
+       tmp = state & SCI_USB_CHARGE_BAT_MASK;
+       status = (tmp == 0x4) ? 1 : 0;
+       /* Determine the battery level set */
+       bat_lvl = state >> HCI_MISC_SHIFT;
+
+       return sprintf(buf, "%d %d\n", status, bat_lvl);
+}
+
+static ssize_t sleep_functions_on_battery_store(struct device *dev,
+                                               struct device_attribute *attr,
+                                               const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 status;
+       int value;
+       int ret;
+       int tmp;
+
+       ret = kstrtoint(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       /*
+        * Set the status of the function:
+        * 0 - Disabled
+        * 1-100 - Enabled
+        */
+       if (value < 0 || value > 100)
+               return -EINVAL;
+
+       if (value == 0) {
+               tmp = toshiba->usbsc_bat_level << HCI_MISC_SHIFT;
+               status = tmp | SCI_USB_CHARGE_BAT_LVL_OFF;
+       } else {
+               tmp = value << HCI_MISC_SHIFT;
+               status = tmp | SCI_USB_CHARGE_BAT_LVL_ON;
+       }
+       ret = toshiba_sleep_functions_status_set(toshiba, status);
+       if (ret < 0)
+               return ret;
+
+       toshiba->usbsc_bat_level = status >> HCI_MISC_SHIFT;
+
+       return count;
+}
+static DEVICE_ATTR_RW(sleep_functions_on_battery);
+
+static ssize_t usb_rapid_charge_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_usb_rapid_charge_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_rapid_charge_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_usb_rapid_charge_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_rapid_charge);
+
+static ssize_t usb_sleep_music_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_usb_sleep_music_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_sleep_music_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_usb_sleep_music_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_sleep_music);
+
+static ssize_t kbd_function_keys_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int mode;
+       int ret;
+
+       ret = toshiba_function_keys_get(toshiba, &mode);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t kbd_function_keys_store(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int mode;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &mode);
+       if (ret)
+               return ret;
+       /*
+        * Check for the function keys mode where:
+        * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12})
+        * 1 - Special functions (Opposite of the above setting)
+        */
+       if (mode != 0 && mode != 1)
+               return -EINVAL;
+
+       ret = toshiba_function_keys_set(toshiba, mode);
+       if (ret)
+               return ret;
+
+       pr_info("Reboot for changes to KBD Function Keys to take effect");
+
+       return count;
+}
+static DEVICE_ATTR_RW(kbd_function_keys);
+
+static ssize_t panel_power_on_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_panel_power_on_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t panel_power_on_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_panel_power_on_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       pr_info("Reboot for changes to Panel Power ON to take effect");
+
+       return count;
+}
+static DEVICE_ATTR_RW(panel_power_on);
+
+static ssize_t usb_three_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_usb_three_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_three_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       /*
+        * Check for USB 3 mode where:
+        * 0 - Disabled (Acts like a USB 2 port, saving power)
+        * 1 - Enabled
+        */
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_usb_three_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       pr_info("Reboot for changes to USB 3 to take effect");
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_three);
+
+static struct attribute *toshiba_attributes[] = {
+       &dev_attr_version.attr,
+       &dev_attr_fan.attr,
+       &dev_attr_kbd_backlight_mode.attr,
+       &dev_attr_kbd_type.attr,
+       &dev_attr_available_kbd_modes.attr,
+       &dev_attr_kbd_backlight_timeout.attr,
+       &dev_attr_touchpad.attr,
+       &dev_attr_position.attr,
+       &dev_attr_usb_sleep_charge.attr,
+       &dev_attr_sleep_functions_on_battery.attr,
+       &dev_attr_usb_rapid_charge.attr,
+       &dev_attr_usb_sleep_music.attr,
+       &dev_attr_kbd_function_keys.attr,
+       &dev_attr_panel_power_on.attr,
+       &dev_attr_usb_three.attr,
+       NULL,
+};
 
 static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
                                        struct attribute *attr, int idx)
@@ -1556,7 +2278,9 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
        struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
        bool exists = true;
 
-       if (attr == &dev_attr_kbd_backlight_mode.attr)
+       if (attr == &dev_attr_fan.attr)
+               exists = (drv->fan_supported) ? true : false;
+       else if (attr == &dev_attr_kbd_backlight_mode.attr)
                exists = (drv->kbd_illum_supported) ? true : false;
        else if (attr == &dev_attr_kbd_backlight_timeout.attr)
                exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
@@ -1564,10 +2288,29 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
                exists = (drv->touchpad_supported) ? true : false;
        else if (attr == &dev_attr_position.attr)
                exists = (drv->accelerometer_supported) ? true : false;
+       else if (attr == &dev_attr_usb_sleep_charge.attr)
+               exists = (drv->usb_sleep_charge_supported) ? true : false;
+       else if (attr == &dev_attr_sleep_functions_on_battery.attr)
+               exists = (drv->usb_sleep_charge_supported) ? true : false;
+       else if (attr == &dev_attr_usb_rapid_charge.attr)
+               exists = (drv->usb_rapid_charge_supported) ? true : false;
+       else if (attr == &dev_attr_usb_sleep_music.attr)
+               exists = (drv->usb_sleep_music_supported) ? true : false;
+       else if (attr == &dev_attr_kbd_function_keys.attr)
+               exists = (drv->kbd_function_keys_supported) ? true : false;
+       else if (attr == &dev_attr_panel_power_on.attr)
+               exists = (drv->panel_power_on_supported) ? true : false;
+       else if (attr == &dev_attr_usb_three.attr)
+               exists = (drv->usb_three_supported) ? true : false;
 
        return exists ? attr->mode : 0;
 }
 
+static struct attribute_group toshiba_attr_group = {
+       .is_visible = toshiba_sysfs_is_visible,
+       .attrs = toshiba_attributes,
+};
+
 /*
  * Hotkeys
  */
@@ -1644,7 +2387,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
        if (scancode == 0x100)
                return;
 
-       /* act on key press; ignore key release */
+       /* Act on key press; ignore key release */
        if (scancode & 0x80)
                return;
 
@@ -1680,7 +2423,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
                                hci_result =
                                        hci_write1(dev, HCI_SYSTEM_EVENT, 1);
                                pr_notice("Re-enabled hotkeys\n");
-                               /* fall through */
+                               /* Fall through */
                        default:
                                retries--;
                                break;
@@ -1802,7 +2545,7 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
 
-       /* adding an extra level and having 0 change to transflective mode */
+       /* Adding an extra level and having 0 change to transflective mode */
        if (dev->tr_backlight_supported)
                props.max_brightness++;
 
@@ -1973,6 +2716,24 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        ret = toshiba_accelerometer_supported(dev);
        dev->accelerometer_supported = !ret;
 
+       ret = toshiba_usb_sleep_charge_get(dev, &dummy);
+       dev->usb_sleep_charge_supported = !ret;
+
+       ret = toshiba_usb_rapid_charge_get(dev, &dummy);
+       dev->usb_rapid_charge_supported = !ret;
+
+       ret = toshiba_usb_sleep_music_get(dev, &dummy);
+       dev->usb_sleep_music_supported = !ret;
+
+       ret = toshiba_function_keys_get(dev, &dummy);
+       dev->kbd_function_keys_supported = !ret;
+
+       ret = toshiba_panel_power_on_get(dev, &dummy);
+       dev->panel_power_on_supported = !ret;
+
+       ret = toshiba_usb_three_get(dev, &dummy);
+       dev->usb_three_supported = !ret;
+
        /* Determine whether or not BIOS supports fan and video interfaces */
 
        ret = get_video_status(dev, &dummy);
index 782e82289571b219ee0faa695c9d17cefb832c69..f980ff7166e98e93546f7f79657087983a5388ce 100644 (file)
@@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
        /* check if the resource is already in use, skip if the
         * device is active because it itself may be in use */
        if (!dev->active) {
-               if (__check_region(&ioport_resource, *port, length(port, end)))
+               if (!request_region(*port, length(port, end), "pnp"))
                        return 0;
+               release_region(*port, length(port, end));
        }
 
        /* check if the resource is reserved */
@@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
        /* check if the resource is already in use, skip if the
         * device is active because it itself may be in use */
        if (!dev->active) {
-               if (check_mem_region(*addr, length(addr, end)))
+               if (!request_mem_region(*addr, length(addr, end), "pnp"))
                        return 0;
+               release_mem_region(*addr, length(addr, end));
        }
 
        /* check if the resource is reserved */
index a3ecf58096348cbe391522ae83cd85bb0195ea02..b1541f40fd8d19cc0d1ef6a39e86b1dbe8d573fa 100644 (file)
@@ -53,6 +53,7 @@ config PWM_ATMEL
 config PWM_ATMEL_HLCDC_PWM
        tristate "Atmel HLCDC PWM support"
        depends on MFD_ATMEL_HLCDC
+       depends on HAVE_CLK
        help
          Generic PWM framework driver for the PWM output of the HLCDC
          (Atmel High-end LCD Controller). This PWM output is mainly used
@@ -130,6 +131,19 @@ config PWM_FSL_FTM
          To compile this driver as a module, choose M here: the module
          will be called pwm-fsl-ftm.
 
+config PWM_IMG
+       tristate "Imagination Technologies PWM driver"
+       depends on HAS_IOMEM
+       depends on MFD_SYSCON
+       depends on COMMON_CLK
+       depends on MIPS || COMPILE_TEST
+       help
+         Generic PWM framework driver for Imagination Technologies
+         PWM block which supports 4 channels.
+
+         To compile this driver as a module, choose M here: the module
+         will be called pwm-img
+
 config PWM_IMX
        tristate "i.MX PWM support"
        depends on ARCH_MXC
@@ -283,6 +297,16 @@ config PWM_STI
          To compile this driver as a module, choose M here: the module
          will be called pwm-sti.
 
+config PWM_SUN4I
+       tristate "Allwinner PWM support"
+       depends on ARCH_SUNXI || COMPILE_TEST
+       depends on HAS_IOMEM && COMMON_CLK
+       help
+         Generic PWM framework driver for Allwinner SoCs.
+
+         To compile this driver as a module, choose M here: the module
+         will be called pwm-sun4i.
+
 config PWM_TEGRA
        tristate "NVIDIA Tegra PWM support"
        depends on ARCH_TEGRA
index 65259ac1e8de8ce4e5897845ceef79ad3005080b..ec50eb5b5a8fd72e1745f4d43801652d73afb65a 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_PWM_BFIN)                += pwm-bfin.o
 obj-$(CONFIG_PWM_CLPS711X)     += pwm-clps711x.o
 obj-$(CONFIG_PWM_EP93XX)       += pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)      += pwm-fsl-ftm.o
+obj-$(CONFIG_PWM_IMG)          += pwm-img.o
 obj-$(CONFIG_PWM_IMX)          += pwm-imx.o
 obj-$(CONFIG_PWM_JZ4740)       += pwm-jz4740.o
 obj-$(CONFIG_PWM_LP3943)       += pwm-lp3943.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_PWM_ROCKCHIP)    += pwm-rockchip.o
 obj-$(CONFIG_PWM_SAMSUNG)      += pwm-samsung.o
 obj-$(CONFIG_PWM_SPEAR)                += pwm-spear.o
 obj-$(CONFIG_PWM_STI)          += pwm-sti.o
+obj-$(CONFIG_PWM_SUN4I)                += pwm-sun4i.o
 obj-$(CONFIG_PWM_TEGRA)                += pwm-tegra.o
 obj-$(CONFIG_PWM_TIECAP)       += pwm-tiecap.o
 obj-$(CONFIG_PWM_TIEHRPWM)     += pwm-tiehrpwm.o
index 966497d10c6ef201144fd6cd66edef7298b18c99..810aef3f4c3e84586d5dd5719ea5d9c74dcfdcfe 100644 (file)
@@ -192,7 +192,7 @@ static void of_pwmchip_add(struct pwm_chip *chip)
 
 static void of_pwmchip_remove(struct pwm_chip *chip)
 {
-       if (chip->dev && chip->dev->of_node)
+       if (chip->dev)
                of_node_put(chip->dev->of_node);
 }
 
index e7a785fadcdf973390dcda8cfdf6f4b872acb2c3..522f7075bb1a42b14d699d5ee6548fe375fcdd53 100644 (file)
@@ -64,6 +64,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
 
        if (!chip->errata || !chip->errata->slow_clk_erratum) {
                clk_freq = clk_get_rate(new_clk);
+               if (!clk_freq)
+                       return -EINVAL;
+
                clk_period_ns = (u64)NSEC_PER_SEC * 256;
                do_div(clk_period_ns, clk_freq);
        }
@@ -73,6 +76,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
            clk_period_ns > period_ns) {
                new_clk = hlcdc->sys_clk;
                clk_freq = clk_get_rate(new_clk);
+               if (!clk_freq)
+                       return -EINVAL;
+
                clk_period_ns = (u64)NSEC_PER_SEC * 256;
                do_div(clk_period_ns, clk_freq);
        }
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
new file mode 100644 (file)
index 0000000..476171a
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Imagination Technologies Pulse Width Modulator driver
+ *
+ * Copyright (c) 2014-2015, Imagination Technologies
+ *
+ * Based on drivers/pwm/pwm-tegra.c, Copyright (c) 2010, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* PWM registers */
+#define PWM_CTRL_CFG                           0x0000
+#define PWM_CTRL_CFG_NO_SUB_DIV                        0
+#define PWM_CTRL_CFG_SUB_DIV0                  1
+#define PWM_CTRL_CFG_SUB_DIV1                  2
+#define PWM_CTRL_CFG_SUB_DIV0_DIV1             3
+#define PWM_CTRL_CFG_DIV_SHIFT(ch)             ((ch) * 2 + 4)
+#define PWM_CTRL_CFG_DIV_MASK                  0x3
+
+#define PWM_CH_CFG(ch)                         (0x4 + (ch) * 4)
+#define PWM_CH_CFG_TMBASE_SHIFT                        0
+#define PWM_CH_CFG_DUTY_SHIFT                  16
+
+#define PERIP_PWM_PDM_CONTROL                  0x0140
+#define PERIP_PWM_PDM_CONTROL_CH_MASK          0x1
+#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch)     ((ch) * 4)
+
+#define MAX_TMBASE_STEPS                       65536
+
+struct img_pwm_chip {
+       struct device   *dev;
+       struct pwm_chip chip;
+       struct clk      *pwm_clk;
+       struct clk      *sys_clk;
+       void __iomem    *base;
+       struct regmap   *periph_regs;
+};
+
+static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
+{
+       return container_of(chip, struct img_pwm_chip, chip);
+}
+
+static inline void img_pwm_writel(struct img_pwm_chip *chip,
+                                 u32 reg, u32 val)
+{
+       writel(val, chip->base + reg);
+}
+
+static inline u32 img_pwm_readl(struct img_pwm_chip *chip,
+                                        u32 reg)
+{
+       return readl(chip->base + reg);
+}
+
+static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+                         int duty_ns, int period_ns)
+{
+       u32 val, div, duty, timebase;
+       unsigned long mul, output_clk_hz, input_clk_hz;
+       struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+
+       input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
+       output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
+
+       mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
+       if (mul <= MAX_TMBASE_STEPS) {
+               div = PWM_CTRL_CFG_NO_SUB_DIV;
+               timebase = DIV_ROUND_UP(mul, 1);
+       } else if (mul <= MAX_TMBASE_STEPS * 8) {
+               div = PWM_CTRL_CFG_SUB_DIV0;
+               timebase = DIV_ROUND_UP(mul, 8);
+       } else if (mul <= MAX_TMBASE_STEPS * 64) {
+               div = PWM_CTRL_CFG_SUB_DIV1;
+               timebase = DIV_ROUND_UP(mul, 64);
+       } else if (mul <= MAX_TMBASE_STEPS * 512) {
+               div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
+               timebase = DIV_ROUND_UP(mul, 512);
+       } else if (mul > MAX_TMBASE_STEPS * 512) {
+               dev_err(chip->dev,
+                       "failed to configure timebase steps/divider value\n");
+               return -EINVAL;
+       }
+
+       duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
+
+       val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
+       val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
+               PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm);
+       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+       val = (duty << PWM_CH_CFG_DUTY_SHIFT) |
+             (timebase << PWM_CH_CFG_TMBASE_SHIFT);
+       img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
+
+       return 0;
+}
+
+static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       u32 val;
+       struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+
+       val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       val |= BIT(pwm->hwpwm);
+       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+       regmap_update_bits(pwm_chip->periph_regs, PERIP_PWM_PDM_CONTROL,
+                          PERIP_PWM_PDM_CONTROL_CH_MASK <<
+                          PERIP_PWM_PDM_CONTROL_CH_SHIFT(pwm->hwpwm), 0);
+
+       return 0;
+}
+
+static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       u32 val;
+       struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+
+       val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       val &= ~BIT(pwm->hwpwm);
+       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+}
+
+static const struct pwm_ops img_pwm_ops = {
+       .config = img_pwm_config,
+       .enable = img_pwm_enable,
+       .disable = img_pwm_disable,
+       .owner = THIS_MODULE,
+};
+
+static int img_pwm_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct resource *res;
+       struct img_pwm_chip *pwm;
+
+       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+       if (!pwm)
+               return -ENOMEM;
+
+       pwm->dev = &pdev->dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pwm->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pwm->base))
+               return PTR_ERR(pwm->base);
+
+       pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                          "img,cr-periph");
+       if (IS_ERR(pwm->periph_regs))
+               return PTR_ERR(pwm->periph_regs);
+
+       pwm->sys_clk = devm_clk_get(&pdev->dev, "sys");
+       if (IS_ERR(pwm->sys_clk)) {
+               dev_err(&pdev->dev, "failed to get system clock\n");
+               return PTR_ERR(pwm->sys_clk);
+       }
+
+       pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+       if (IS_ERR(pwm->pwm_clk)) {
+               dev_err(&pdev->dev, "failed to get pwm clock\n");
+               return PTR_ERR(pwm->pwm_clk);
+       }
+
+       ret = clk_prepare_enable(pwm->sys_clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(pwm->pwm_clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
+               goto disable_sysclk;
+       }
+
+       pwm->chip.dev = &pdev->dev;
+       pwm->chip.ops = &img_pwm_ops;
+       pwm->chip.base = -1;
+       pwm->chip.npwm = 4;
+
+       ret = pwmchip_add(&pwm->chip);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
+               goto disable_pwmclk;
+       }
+
+       platform_set_drvdata(pdev, pwm);
+       return 0;
+
+disable_pwmclk:
+       clk_disable_unprepare(pwm->pwm_clk);
+disable_sysclk:
+       clk_disable_unprepare(pwm->sys_clk);
+       return ret;
+}
+
+static int img_pwm_remove(struct platform_device *pdev)
+{
+       struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
+       u32 val;
+       unsigned int i;
+
+       for (i = 0; i < pwm_chip->chip.npwm; i++) {
+               val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+               val &= ~BIT(i);
+               img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+       }
+
+       clk_disable_unprepare(pwm_chip->pwm_clk);
+       clk_disable_unprepare(pwm_chip->sys_clk);
+
+       return pwmchip_remove(&pwm_chip->chip);
+}
+
+static const struct of_device_id img_pwm_of_match[] = {
+       { .compatible = "img,pistachio-pwm", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+
+static struct platform_driver img_pwm_driver = {
+       .driver = {
+               .name = "img-pwm",
+               .of_match_table = img_pwm_of_match,
+       },
+       .probe = img_pwm_probe,
+       .remove = img_pwm_remove,
+};
+module_platform_driver(img_pwm_driver);
+
+MODULE_AUTHOR("Sai Masarapu <Sai.Masarapu@imgtec.com>");
+MODULE_DESCRIPTION("Imagination Technologies PWM DAC driver");
+MODULE_LICENSE("GPL v2");
index b95115cdaea7ba813a9c42a8da10c8e8db51279e..92abbd56b9f7183810eb222b8b194f3817838367 100644 (file)
@@ -57,6 +57,7 @@ struct sti_pwm_chip {
        struct regmap_field *pwm_int_en;
        struct pwm_chip chip;
        struct pwm_device *cur;
+       unsigned long configured;
        unsigned int en_count;
        struct mutex sti_pwm_lock; /* To sync between enable/disable calls */
        void __iomem *mmio;
@@ -102,24 +103,6 @@ static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period,
        return 0;
 }
 
-/* Calculate the number of PWM devices configured with a period. */
-static unsigned int sti_pwm_count_configured(struct pwm_chip *chip)
-{
-       struct pwm_device *pwm;
-       unsigned int ncfg = 0;
-       unsigned int i;
-
-       for (i = 0; i < chip->npwm; i++) {
-               pwm = &chip->pwms[i];
-               if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
-                       if (pwm_get_period(pwm))
-                               ncfg++;
-               }
-       }
-
-       return ncfg;
-}
-
 /*
  * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
  * The only way to change the period (apart from changing the PWM input clock)
@@ -141,7 +124,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        unsigned int ncfg;
        bool period_same = false;
 
-       ncfg = sti_pwm_count_configured(chip);
+       ncfg = hweight_long(pc->configured);
        if (ncfg)
                period_same = (period_ns == pwm_get_period(cur));
 
@@ -197,6 +180,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
                ret = regmap_field_write(pc->pwm_int_en, 0);
 
+               set_bit(pwm->hwpwm, &pc->configured);
                pc->cur = pwm;
 
                dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n",
@@ -254,10 +238,18 @@ static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        mutex_unlock(&pc->sti_pwm_lock);
 }
 
+static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+
+       clear_bit(pwm->hwpwm, &pc->configured);
+}
+
 static const struct pwm_ops sti_pwm_ops = {
        .config = sti_pwm_config,
        .enable = sti_pwm_enable,
        .disable = sti_pwm_disable,
+       .free = sti_pwm_free,
        .owner = THIS_MODULE,
 };
 
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
new file mode 100644 (file)
index 0000000..cd9dde5
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Driver for Allwinner sun4i Pulse Width Modulation Controller
+ *
+ * Copyright (C) 2014 Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+
+#define PWM_CTRL_REG           0x0
+
+#define PWM_CH_PRD_BASE                0x4
+#define PWM_CH_PRD_OFFSET      0x4
+#define PWM_CH_PRD(ch)         (PWM_CH_PRD_BASE + PWM_CH_PRD_OFFSET * (ch))
+
+#define PWMCH_OFFSET           15
+#define PWM_PRESCAL_MASK       GENMASK(3, 0)
+#define PWM_PRESCAL_OFF                0
+#define PWM_EN                 BIT(4)
+#define PWM_ACT_STATE          BIT(5)
+#define PWM_CLK_GATING         BIT(6)
+#define PWM_MODE               BIT(7)
+#define PWM_PULSE              BIT(8)
+#define PWM_BYPASS             BIT(9)
+
+#define PWM_RDY_BASE           28
+#define PWM_RDY_OFFSET         1
+#define PWM_RDY(ch)            BIT(PWM_RDY_BASE + PWM_RDY_OFFSET * (ch))
+
+#define PWM_PRD(prd)           (((prd) - 1) << 16)
+#define PWM_PRD_MASK           GENMASK(15, 0)
+
+#define PWM_DTY_MASK           GENMASK(15, 0)
+
+#define BIT_CH(bit, chan)      ((bit) << ((chan) * PWMCH_OFFSET))
+
+static const u32 prescaler_table[] = {
+       120,
+       180,
+       240,
+       360,
+       480,
+       0,
+       0,
+       0,
+       12000,
+       24000,
+       36000,
+       48000,
+       72000,
+       0,
+       0,
+       0, /* Actually 1 but tested separately */
+};
+
+struct sun4i_pwm_data {
+       bool has_prescaler_bypass;
+       bool has_rdy;
+};
+
+struct sun4i_pwm_chip {
+       struct pwm_chip chip;
+       struct clk *clk;
+       void __iomem *base;
+       spinlock_t ctrl_lock;
+       const struct sun4i_pwm_data *data;
+};
+
+static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
+{
+       return container_of(chip, struct sun4i_pwm_chip, chip);
+}
+
+static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip,
+                                 unsigned long offset)
+{
+       return readl(chip->base + offset);
+}
+
+static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
+                                   u32 val, unsigned long offset)
+{
+       writel(val, chip->base + offset);
+}
+
+static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+                           int duty_ns, int period_ns)
+{
+       struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+       u32 prd, dty, val, clk_gate;
+       u64 clk_rate, div = 0;
+       unsigned int prescaler = 0;
+       int err;
+
+       clk_rate = clk_get_rate(sun4i_pwm->clk);
+
+       if (sun4i_pwm->data->has_prescaler_bypass) {
+               /* First, test without any prescaler when available */
+               prescaler = PWM_PRESCAL_MASK;
+               /*
+                * When not using any prescaler, the clock period in nanoseconds
+                * is not an integer so round it half up instead of
+                * truncating to get less surprising values.
+                */
+               div = clk_rate * period_ns + NSEC_PER_SEC/2;
+               do_div(div, NSEC_PER_SEC);
+               if (div - 1 > PWM_PRD_MASK)
+                       prescaler = 0;
+       }
+
+       if (prescaler == 0) {
+               /* Go up from the first divider */
+               for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
+                       if (!prescaler_table[prescaler])
+                               continue;
+                       div = clk_rate;
+                       do_div(div, prescaler_table[prescaler]);
+                       div = div * period_ns;
+                       do_div(div, NSEC_PER_SEC);
+                       if (div - 1 <= PWM_PRD_MASK)
+                               break;
+               }
+
+               if (div - 1 > PWM_PRD_MASK) {
+                       dev_err(chip->dev, "period exceeds the maximum value\n");
+                       return -EINVAL;
+               }
+       }
+
+       prd = div;
+       div *= duty_ns;
+       do_div(div, period_ns);
+       dty = div;
+
+       err = clk_prepare_enable(sun4i_pwm->clk);
+       if (err) {
+               dev_err(chip->dev, "failed to enable PWM clock\n");
+               return err;
+       }
+
+       spin_lock(&sun4i_pwm->ctrl_lock);
+       val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+
+       if (sun4i_pwm->data->has_rdy && (val & PWM_RDY(pwm->hwpwm))) {
+               spin_unlock(&sun4i_pwm->ctrl_lock);
+               clk_disable_unprepare(sun4i_pwm->clk);
+               return -EBUSY;
+       }
+
+       clk_gate = val & BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+       if (clk_gate) {
+               val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+               sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+       }
+
+       val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+       val &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
+       val |= BIT_CH(prescaler, pwm->hwpwm);
+       sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+
+       val = (dty & PWM_DTY_MASK) | PWM_PRD(prd);
+       sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+
+       if (clk_gate) {
+               val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+               val |= clk_gate;
+               sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+       }
+
+       spin_unlock(&sun4i_pwm->ctrl_lock);
+       clk_disable_unprepare(sun4i_pwm->clk);
+
+       return 0;
+}
+
+static int sun4i_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+                                 enum pwm_polarity polarity)
+{
+       struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+       u32 val;
+       int ret;
+
+       ret = clk_prepare_enable(sun4i_pwm->clk);
+       if (ret) {
+               dev_err(chip->dev, "failed to enable PWM clock\n");
+               return ret;
+       }
+
+       spin_lock(&sun4i_pwm->ctrl_lock);
+       val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+
+       if (polarity != PWM_POLARITY_NORMAL)
+               val &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
+       else
+               val |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
+
+       sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+
+       spin_unlock(&sun4i_pwm->ctrl_lock);
+       clk_disable_unprepare(sun4i_pwm->clk);
+
+       return 0;
+}
+
+static int sun4i_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+       u32 val;
+       int ret;
+
+       ret = clk_prepare_enable(sun4i_pwm->clk);
+       if (ret) {
+               dev_err(chip->dev, "failed to enable PWM clock\n");
+               return ret;
+       }
+
+       spin_lock(&sun4i_pwm->ctrl_lock);
+       val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+       val |= BIT_CH(PWM_EN, pwm->hwpwm);
+       val |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+       sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+       spin_unlock(&sun4i_pwm->ctrl_lock);
+
+       return 0;
+}
+
+static void sun4i_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+       u32 val;
+
+       spin_lock(&sun4i_pwm->ctrl_lock);
+       val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+       val &= ~BIT_CH(PWM_EN, pwm->hwpwm);
+       val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+       sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
+       spin_unlock(&sun4i_pwm->ctrl_lock);
+
+       clk_disable_unprepare(sun4i_pwm->clk);
+}
+
+static const struct pwm_ops sun4i_pwm_ops = {
+       .config = sun4i_pwm_config,
+       .set_polarity = sun4i_pwm_set_polarity,
+       .enable = sun4i_pwm_enable,
+       .disable = sun4i_pwm_disable,
+       .owner = THIS_MODULE,
+};
+
+static const struct sun4i_pwm_data sun4i_pwm_data_a10 = {
+       .has_prescaler_bypass = false,
+       .has_rdy = false,
+};
+
+static const struct sun4i_pwm_data sun4i_pwm_data_a20 = {
+       .has_prescaler_bypass = true,
+       .has_rdy = true,
+};
+
+static const struct of_device_id sun4i_pwm_dt_ids[] = {
+       {
+               .compatible = "allwinner,sun4i-a10-pwm",
+               .data = &sun4i_pwm_data_a10,
+       }, {
+               .compatible = "allwinner,sun7i-a20-pwm",
+               .data = &sun4i_pwm_data_a20,
+       }, {
+               /* sentinel */
+       },
+};
+MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
+
+static int sun4i_pwm_probe(struct platform_device *pdev)
+{
+       struct sun4i_pwm_chip *pwm;
+       struct resource *res;
+       u32 val;
+       int i, ret;
+       const struct of_device_id *match;
+
+       match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
+
+       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+       if (!pwm)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pwm->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pwm->base))
+               return PTR_ERR(pwm->base);
+
+       pwm->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(pwm->clk))
+               return PTR_ERR(pwm->clk);
+
+       pwm->chip.dev = &pdev->dev;
+       pwm->chip.ops = &sun4i_pwm_ops;
+       pwm->chip.base = -1;
+       pwm->chip.npwm = 2;
+       pwm->chip.can_sleep = true;
+       pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+       pwm->chip.of_pwm_n_cells = 3;
+       pwm->data = match->data;
+
+       spin_lock_init(&pwm->ctrl_lock);
+
+       ret = pwmchip_add(&pwm->chip);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, pwm);
+
+       ret = clk_prepare_enable(pwm->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable PWM clock\n");
+               goto clk_error;
+       }
+
+       val = sun4i_pwm_readl(pwm, PWM_CTRL_REG);
+       for (i = 0; i < pwm->chip.npwm; i++)
+               if (!(val & BIT_CH(PWM_ACT_STATE, i)))
+                       pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED;
+       clk_disable_unprepare(pwm->clk);
+
+       return 0;
+
+clk_error:
+       pwmchip_remove(&pwm->chip);
+       return ret;
+}
+
+static int sun4i_pwm_remove(struct platform_device *pdev)
+{
+       struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
+
+       return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver sun4i_pwm_driver = {
+       .driver = {
+               .name = "sun4i-pwm",
+               .of_match_table = sun4i_pwm_dt_ids,
+       },
+       .probe = sun4i_pwm_probe,
+       .remove = sun4i_pwm_remove,
+};
+module_platform_driver(sun4i_pwm_driver);
+
+MODULE_ALIAS("platform:sun4i-pwm");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner sun4i PWM driver");
+MODULE_LICENSE("GPL v2");
index 5b97cae5423a0fead0b3f944c6d7ee933f58459e..cabd7d8e05cc0fdd79e42da12046d43d5e7032af 100644 (file)
@@ -87,7 +87,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
         * cycles at the PWM clock rate will take period_ns nanoseconds.
         */
        rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
-       hz = 1000000000ul / period_ns;
+       hz = NSEC_PER_SEC / period_ns;
 
        rate = (rate + (hz / 2)) / hz;
 
index f64c5decb747a8be4425cca3e81da96200cf6812..47295940a868786e91712847a9386ea65a74835d 100644 (file)
@@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
        return txd;
 }
 
-static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-                            unsigned long arg)
+static int tsi721_terminate_all(struct dma_chan *dchan)
 {
        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
        struct tsi721_tx_desc *desc, *_d;
@@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 
        dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
 
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENOSYS;
-
        spin_lock_bh(&bdma_chan->lock);
 
        bdma_chan->active = false;
@@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
        mport->dma.device_tx_status = tsi721_tx_status;
        mport->dma.device_issue_pending = tsi721_issue_pending;
        mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
-       mport->dma.device_control = tsi721_device_control;
+       mport->dma.device_terminate_all = tsi721_terminate_all;
 
        err = dma_async_device_register(&mport->dma);
        if (err)
index e8647f7cf25e27378af00ec7799195c2a29e995a..00c5cc3d954636784a14f5d3c03c7ffcff7896b5 100644 (file)
@@ -205,6 +205,7 @@ static int rpm_reg_write(struct qcom_rpm_reg *vreg,
        vreg->val[req->word] |= value << req->shift;
 
        return qcom_rpm_write(vreg->rpm,
+                             QCOM_RPM_ACTIVE_STATE,
                              vreg->resource,
                              vreg->val,
                              vreg->parts->request_len);
index cedb41c95daed56ebc622e6350eeb60d5ac3bda0..b5b5c3d485d637122ad21605ff56948d98f7af6a 100644 (file)
@@ -65,7 +65,7 @@ config RTC_DEBUG
 comment "RTC interfaces"
 
 config RTC_INTF_SYSFS
-       boolean "/sys/class/rtc/rtcN (sysfs)"
+       bool "/sys/class/rtc/rtcN (sysfs)"
        depends on SYSFS
        default RTC_CLASS
        help
@@ -75,7 +75,7 @@ config RTC_INTF_SYSFS
          If unsure, say Y.
 
 config RTC_INTF_PROC
-       boolean "/proc/driver/rtc (procfs for rtcN)"
+       bool "/proc/driver/rtc (procfs for rtcN)"
        depends on PROC_FS
        default RTC_CLASS
        help
@@ -88,7 +88,7 @@ config RTC_INTF_PROC
          If unsure, say Y.
 
 config RTC_INTF_DEV
-       boolean "/dev/rtcN (character devices)"
+       bool "/dev/rtcN (character devices)"
        default RTC_CLASS
        help
          Say yes here if you want to use your RTCs using the /dev
@@ -466,7 +466,7 @@ config RTC_DRV_DM355EVM
          Supports the RTC firmware in the MSP430 on the DM355 EVM.
 
 config RTC_DRV_TWL92330
-       boolean "TI TWL92330/Menelaus"
+       bool "TI TWL92330/Menelaus"
        depends on MENELAUS
        help
          If you say yes here you get support for the RTC on the
index 8c3bfcb115b787318d0ce942c07c655f79fc1cb7..803869c7d7c206f6dbff5cc7427ea3faf8e1bf2b 100644 (file)
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
         * of this RTC chip.  We check for it anyways in case support is
         * added in the future.
         */
-       if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+       if (unlikely(seconds >= 0xc0))
                alrm->time.tm_sec = -1;
        else
                alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
                                                       RTC_SECS_BCD_MASK,
                                                       RTC_SECS_BIN_MASK);
 
-       if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+       if (unlikely(minutes >= 0xc0))
                alrm->time.tm_min = -1;
        else
                alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
                                                       RTC_MINS_BCD_MASK,
                                                       RTC_MINS_BIN_MASK);
 
-       if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+       if (unlikely(hours >= 0xc0))
                alrm->time.tm_hour = -1;
        else
                alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
         * field, and we only support four fields.  We put the support
         * here anyways for the future.
         */
-       if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+       if (unlikely(seconds >= 0xc0))
                seconds = 0xff;
 
-       if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+       if (unlikely(minutes >= 0xc0))
                minutes = 0xff;
 
-       if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+       if (unlikely(hours >= 0xc0))
                hours = 0xff;
 
        alrm->time.tm_mon       = -1;
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 /* ----------------------------------------------------------------------- */
 /* /dev/rtcX Interface functions */
 
-#ifdef CONFIG_RTC_INTF_DEV
 /**
  * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
  * @dev: pointer to device structure.
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
        return 0;
 }
-#endif
 /* ----------------------------------------------------------------------- */
 
 
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev,
                ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
 
        /* Make sure we actually matched something. */
-       if (!bcd_reg_info && !bin_reg_info)
+       if (!bcd_reg_info || !bin_reg_info)
                return -EINVAL;
 
        /* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev,
                return -EINVAL;
 
        /* Make sure we actually matched something. */
-       if (!bcd_reg_info && !bin_reg_info)
+       if (!bcd_reg_info || !bin_reg_info)
                return -EINVAL;
 
        /* Check for a valid range. */
index aa3e2c7cd83c8a5275b900f37b23912884a789c7..a6f5ee80fadc5f3e8d462fc43b615a1134d509f6 100644 (file)
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp)
                        break;
                cpu_relax();
        }
-       if (resid > 1) {
-               /* FIFO not cleared */
-               shost_printk(KERN_INFO, esp->host,
-                            "FIFO not cleared, %d bytes left\n",
-                            resid);
-       }
 
        /*
         * When there is a residual BCMPLT will never be set
index 96241b20fd2c8b690e57139e8e404a522dcb2392..a7cc618378187fb7d38e504d51befb93e78b223c 100644 (file)
@@ -585,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
                        "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
                return NULL;
        }
-       shost->dma_boundary = pcidev->dma_mask;
        shost->max_id = BE2_MAX_SESSIONS;
        shost->max_channel = 0;
        shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
index 95d581c45413fb38dd778e2b4695e7f7159c9c0b..a1cfbd3dda4713d05f254b0a9ef33131f6c12c0b 100644 (file)
@@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
                                                char *name)
 {
        struct workqueue_struct *wq = NULL;
-       char wq_name[20];
 
-       snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
-       wq = alloc_ordered_workqueue(wq_name, 0);
+       wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
        if (!wq)
                dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
 
index 73f9feecda72b71552b63eb28b7bec9106f6fd4e..99f43b7fc9ab74256d6f22f17e3c6d75c6e5fcdb 100644 (file)
@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
         * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
         */
        memset(&port_name, 0, 36);
-       snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-               fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
-               fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+       snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
        /*
         * Locate our struct se_node_acl either from an explict NodeACL created
         * via ConfigFS, or via running in TPG demo mode.
index 0cbc1fb45f10eb90ac74b1a20a2c755c4dc6a3cb..2270bd51f9c2c240c669e562eb77052f89425a83 100644 (file)
@@ -546,7 +546,7 @@ static ssize_t
 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
 {
        sg_io_hdr_t *hp = &srp->header;
-       int err = 0;
+       int err = 0, err2;
        int len;
 
        if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
                goto err_out;
        }
 err_out:
-       err = sg_finish_rem_req(srp);
-       return (0 == err) ? count : err;
+       err2 = sg_finish_rem_req(srp);
+       return err ? : err2 ? : count;
 }
 
 static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
        }
        /* Rely on write phase to clean out srp status values, so no "else" */
 
+       /*
+        * Free the request as soon as it is complete so that its resources
+        * can be reused without waiting for userspace to read() the
+        * result.  But keep the associated bio (if any) around until
+        * blk_rq_unmap_user() can be called from user context.
+        */
+       srp->rq = NULL;
+       if (rq->cmd != rq->__cmd)
+               kfree(rq->cmd);
+       __blk_put_request(rq->q, rq);
+
        write_lock_irqsave(&sfp->rq_list_lock, iflags);
        if (unlikely(srp->orphan)) {
                if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
                        return -ENOMEM;
        }
 
-       rq = blk_get_request(q, rw, GFP_ATOMIC);
+       /*
+        * NOTE
+        *
+        * With scsi-mq enabled, there are a fixed number of preallocated
+        * requests equal in number to shost->can_queue.  If all of the
+        * preallocated requests are already in use, then using GFP_ATOMIC with
+        * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
+        * will cause blk_get_request() to sleep until an active command
+        * completes, freeing up a request.  Neither option is ideal, but
+        * GFP_KERNEL is the better choice to prevent userspace from getting an
+        * unexpected EWOULDBLOCK.
+        *
+        * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
+        * does not sleep except under memory pressure.
+        */
+       rq = blk_get_request(q, rw, GFP_KERNEL);
        if (IS_ERR(rq)) {
                kfree(long_cmdp);
                return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
        SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
                                      "sg_finish_rem_req: res_used=%d\n",
                                      (int) srp->res_used));
-       if (srp->rq) {
-               if (srp->bio)
-                       ret = blk_rq_unmap_user(srp->bio);
+       if (srp->bio)
+               ret = blk_rq_unmap_user(srp->bio);
 
+       if (srp->rq) {
                if (srp->rq->cmd != srp->rq->__cmd)
                        kfree(srp->rq->cmd);
                blk_put_request(srp->rq);
index c52bb5dfaedb1daa52e37076e2012442c6625e90..f164f24a4a556cfe19c91f9031682a8ff6d9f0c5 100644 (file)
@@ -950,6 +950,12 @@ static int virtscsi_probe(struct virtio_device *vdev)
        u32 num_queues;
        struct scsi_host_template *hostt;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        /* We need to know how many queues before we allocate. */
        num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
 
index 7702664d7ed3257e1670ee7d97ee19cadb1dc7cc..289ad016d92504e9d4ebbc47cf8d244ab6cb68b5 100644 (file)
@@ -870,6 +870,7 @@ fail_free_params:
 }
 
 static struct scsi_host_template wd719x_template = {
+       .module                         = THIS_MODULE,
        .name                           = "Western Digital 719x",
        .queuecommand                   = wd719x_queuecommand,
        .eh_abort_handler               = wd719x_abort,
index f3ee439d6f0e23b41bd54b0ad1bbb7941ec4e505..cd4c293f0dd0d375be6d6d815108aeb588304fec 100644 (file)
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void)
                if (!of_machine_is_compatible("renesas,emev2") &&
                    !of_machine_is_compatible("renesas,r7s72100") &&
                    !of_machine_is_compatible("renesas,r8a73a4") &&
+#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
                    !of_machine_is_compatible("renesas,r8a7740") &&
+#endif
                    !of_machine_is_compatible("renesas,r8a7778") &&
                    !of_machine_is_compatible("renesas,r8a7779") &&
                    !of_machine_is_compatible("renesas,r8a7790") &&
index 95ccedabba4f9dca37dbd4909e3bc578ec619cd3..ab8dfbef6f1bb681a9ff14ca1e7ca09749b6a199 100644 (file)
@@ -29,7 +29,7 @@ menuconfig SPI
 if SPI
 
 config SPI_DEBUG
-       boolean "Debug support for SPI drivers"
+       bool "Debug support for SPI drivers"
        depends on DEBUG_KERNEL
        help
          Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
@@ -40,8 +40,8 @@ config SPI_DEBUG
 #
 
 config SPI_MASTER
-#      boolean "SPI Master Support"
-       boolean
+#      bool "SPI Master Support"
+       bool
        default SPI
        help
          If your system has an master-capable SPI controller (which
index 1e180c400f1721a45c34295d5b7b72f64e722107..a48a7439a2067d4311f81ce625f89ebcea6d1a24 100644 (file)
@@ -1135,6 +1135,8 @@ static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev)
        case SSB_IDLOW_SSBREV_25:     /* TODO - find the proper REJECT bit */
        case SSB_IDLOW_SSBREV_27:     /* same here */
                return SSB_TMSLOW_REJECT;       /* this is a guess */
+       case SSB_IDLOW_SSBREV:
+               break;
        default:
                WARN(1, KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev);
        }
index 7eda0b8b7aab36e187fb03b539cb597276d3da43..0a89ad16371f7ded5824b2da96aa971260f27081 100644 (file)
@@ -1,5 +1,5 @@
 config STAGING_BOARD
-       boolean "Staging Board Support"
+       bool "Staging Board Support"
        depends on OF_ADDRESS
        depends on BROKEN
        help
index 9bc6d3db86d9d353fa0b05dcb528df2a869da218..cc34020204874acb0156a7c7c4f82fe55e4c13f8 100644 (file)
@@ -1,5 +1,5 @@
 config USB_EMXX
-       boolean "EMXX USB Function Device Controller"
+       bool "EMXX USB Function Device Controller"
        depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
        help
           The Emma Mobile series of SoCs from Renesas Electronics and
index fa38be0982f99e55f74532d73a0171e5d0005b03..24183028bd712b11af46cd4531f60b33b4e57338 100644 (file)
@@ -30,13 +30,13 @@ config IIO_SIMPLE_DUMMY
 if IIO_SIMPLE_DUMMY
 
 config IIO_SIMPLE_DUMMY_EVENTS
-       boolean "Event generation support"
+       bool "Event generation support"
        select IIO_DUMMY_EVGEN
        help
          Add some dummy events to the simple dummy driver.
 
 config IIO_SIMPLE_DUMMY_BUFFER
-       boolean "Buffered capture support"
+       bool "Buffered capture support"
        select IIO_BUFFER
        select IIO_KFIFO_BUF
        help
index 88614b71cf6d2886950a890ca64e25778ecae602..ddf1fa9f67f8f80fbb8237260a0b4bef3a74b0d4 100644 (file)
@@ -270,7 +270,7 @@ void ll_invalidate_aliases(struct inode *inode)
 
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
                            struct lookup_intent *it,
-                           struct dentry *de)
+                           struct inode *inode)
 {
        int rc = 0;
 
@@ -280,19 +280,17 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
        if (it_disposition(it, DISP_LOOKUP_NEG))
                return -ENOENT;
 
-       rc = ll_prep_inode(&de->d_inode, request, NULL, it);
+       rc = ll_prep_inode(&inode, request, NULL, it);
 
        return rc;
 }
 
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 {
        LASSERT(it != NULL);
-       LASSERT(dentry != NULL);
 
-       if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
-               struct inode *inode = dentry->d_inode;
-               struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+       if (it->d.lustre.it_lock_mode && inode != NULL) {
+               struct ll_sb_info *sbi = ll_i2sbi(inode);
 
                CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
                       inode, inode->i_ino, inode->i_generation);
index 7c7ef7ec908e48467207144b682291a607d89514..5ebee6ca0a108330711cd02aefea5f87eaccd771 100644 (file)
@@ -2912,8 +2912,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                        oit.it_op = IT_LOOKUP;
 
                /* Call getattr by fid, so do not provide name at all. */
-               op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
-                                            dentry->d_inode, NULL, 0, 0,
+               op_data = ll_prep_md_op_data(NULL, inode,
+                                            inode, NULL, 0, 0,
                                             LUSTRE_OPC_ANY, NULL);
                if (IS_ERR(op_data))
                        return PTR_ERR(op_data);
@@ -2931,7 +2931,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                        goto out;
                }
 
-               rc = ll_revalidate_it_finish(req, &oit, dentry);
+               rc = ll_revalidate_it_finish(req, &oit, inode);
                if (rc != 0) {
                        ll_intent_release(&oit);
                        goto out;
@@ -2944,7 +2944,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                if (!dentry->d_inode->i_nlink)
                        d_lustre_invalidate(dentry, 0);
 
-               ll_lookup_finish_locks(&oit, dentry);
+               ll_lookup_finish_locks(&oit, inode);
        } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
                struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
                u64 valid = OBD_MD_FLGETATTR;
index d032c2b086ccc535ccd488f1410a251b8811601d..2af1d7286250a32097ac89d1735e08ae3c97ebad 100644 (file)
@@ -786,9 +786,9 @@ extern const struct dentry_operations ll_d_ops;
 void ll_intent_drop_lock(struct lookup_intent *);
 void ll_intent_release(struct lookup_intent *);
 void ll_invalidate_aliases(struct inode *);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
-                           struct lookup_intent *it, struct dentry *de);
+                           struct lookup_intent *it, struct inode *inode);
 
 /* llite/llite_lib.c */
 extern struct super_operations lustre_super_operations;
index 4f361b77c749a718621c8767ec97663e43a7c87f..890ac190f5faf3300ab8461f08cfe63182078c27 100644 (file)
@@ -481,6 +481,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
        struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
        struct dentry *save = dentry, *retval;
        struct ptlrpc_request *req = NULL;
+       struct inode *inode;
        struct md_op_data *op_data;
        __u32 opc;
        int rc;
@@ -539,12 +540,13 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
                goto out;
        }
 
-       if ((it->it_op & IT_OPEN) && dentry->d_inode &&
-           !S_ISREG(dentry->d_inode->i_mode) &&
-           !S_ISDIR(dentry->d_inode->i_mode)) {
-               ll_release_openhandle(dentry->d_inode, it);
+       inode = dentry->d_inode;
+       if ((it->it_op & IT_OPEN) && inode &&
+           !S_ISREG(inode->i_mode) &&
+           !S_ISDIR(inode->i_mode)) {
+               ll_release_openhandle(inode, it);
        }
-       ll_lookup_finish_locks(it, dentry);
+       ll_lookup_finish_locks(it, inode);
 
        if (dentry == save)
                retval = NULL;
index aebde3289c50de6722062dfdea21fa1c549090cd..50bad55a0c42e3bd9eef6925520cb1e9bddf2209 100644 (file)
@@ -30,7 +30,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
@@ -45,7 +45,7 @@
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
 #include "iscsi_target_device.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #include <target/iscsi/iscsi_transport.h>
 
@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
        if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-               spin_lock_bh(&conn->sess->ttt_lock);
-               cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-               if (cmd->targ_xfer_tag == 0xFFFFFFFF)
-                       cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-               spin_unlock_bh(&conn->sess->ttt_lock);
+               cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
        } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
        cmd->data_direction     = DMA_NONE;
+       cmd->text_in_ptr        = NULL;
 
        return 0;
 }
@@ -2011,9 +2008,13 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        int cmdsn_ret;
 
        if (!text_in) {
-               pr_err("Unable to locate text_in buffer for sendtargets"
-                      " discovery\n");
-               goto reject;
+               cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
+               if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
+                       pr_err("Unable to locate text_in buffer for sendtargets"
+                              " discovery\n");
+                       goto reject;
+               }
+               goto empty_sendtargets;
        }
        if (strncmp("SendTargets", text_in, 11) != 0) {
                pr_err("Received Text Data that is not"
@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
 
+empty_sendtargets:
        iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
        if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t(
        int_to_scsilun(cmd->se_cmd.orig_fe_lun,
                        (struct scsi_lun *)&hdr->lun);
        hdr->itt                = cmd->init_task_tag;
-       spin_lock_bh(&conn->sess->ttt_lock);
-       r2t->targ_xfer_tag      = conn->sess->targ_xfer_tag++;
-       if (r2t->targ_xfer_tag == 0xFFFFFFFF)
-               r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-       spin_unlock_bh(&conn->sess->ttt_lock);
+       r2t->targ_xfer_tag      = session_get_next_ttt(conn->sess);
        hdr->ttt                = cpu_to_be32(r2t->targ_xfer_tag);
        hdr->statsn             = cpu_to_be32(conn->stat_sn);
        hdr->exp_cmdsn          = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
 
 static int
 iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
-                                 enum iscsit_transport_type network_transport)
+                                 enum iscsit_transport_type network_transport,
+                                 int skip_bytes, bool *completed)
 {
        char *payload = NULL;
        struct iscsi_conn *conn = cmd->conn;
@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
        unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
        unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
 
-       buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+       buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
                         SENDTARGETS_BUF_LIMIT);
 
        payload = kzalloc(buffer_len, GFP_KERNEL);
@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                                end_of_buf = 1;
                                                goto eob;
                                        }
-                                       memcpy(payload + payload_len, buf, len);
-                                       payload_len += len;
-                                       target_name_printed = 1;
+
+                                       if (skip_bytes && len <= skip_bytes) {
+                                               skip_bytes -= len;
+                                       } else {
+                                               memcpy(payload + payload_len, buf, len);
+                                               payload_len += len;
+                                               target_name_printed = 1;
+                                               if (len > skip_bytes)
+                                                       skip_bytes = 0;
+                                       }
                                }
 
                                len = sprintf(buf, "TargetAddress="
@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                        end_of_buf = 1;
                                        goto eob;
                                }
-                               memcpy(payload + payload_len, buf, len);
-                               payload_len += len;
+
+                               if (skip_bytes && len <= skip_bytes) {
+                                       skip_bytes -= len;
+                               } else {
+                                       memcpy(payload + payload_len, buf, len);
+                                       payload_len += len;
+                                       if (len > skip_bytes)
+                                               skip_bytes = 0;
+                               }
                        }
                        spin_unlock(&tpg->tpg_np_lock);
                }
                spin_unlock(&tiqn->tiqn_tpg_lock);
 eob:
-               if (end_of_buf)
+               if (end_of_buf) {
+                       *completed = false;
                        break;
+               }
 
                if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                      enum iscsit_transport_type network_transport)
 {
        int text_length, padding;
+       bool completed = true;
 
-       text_length = iscsit_build_sendtargets_response(cmd, network_transport);
+       text_length = iscsit_build_sendtargets_response(cmd, network_transport,
+                                                       cmd->read_data_done,
+                                                       &completed);
        if (text_length < 0)
                return text_length;
 
+       if (completed) {
+               hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+       } else {
+               hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+               cmd->read_data_done += text_length;
+               if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+                       cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+       }
        hdr->opcode = ISCSI_OP_TEXT_RSP;
-       hdr->flags |= ISCSI_FLAG_CMD_FINAL;
        padding = ((-text_length) & 3);
        hton24(hdr->dlength, text_length);
        hdr->itt = cmd->init_task_tag;
@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
        hdr->statsn = cpu_to_be32(cmd->stat_sn);
 
        iscsit_increment_maxcmdsn(cmd, conn->sess);
+       /*
+        * Reset maxcmdsn_inc in multi-part text payload exchanges to
+        * correctly increment MaxCmdSN for each response answering a
+        * non immediate text request with a valid CmdSN.
+        */
+       cmd->maxcmdsn_inc = 0;
        hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
        hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
 
-       pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
-               " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
-               text_length, conn->cid);
+       pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
+               " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
+               cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
+               !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
+               !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
 
        return text_length + padding;
 }
 EXPORT_SYMBOL(iscsit_build_text_rsp);
 
-/*
- *     FIXME: Add support for F_BIT and C_BIT when the length is longer than
- *     MaxRecvDataSegmentLength.
- */
 static int iscsit_send_text_rsp(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn)
@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
                ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
                break;
        case ISCSI_OP_TEXT:
-               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
-               if (!cmd)
-                       goto reject;
+               if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+                       cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+                       if (!cmd)
+                               goto reject;
+               } else {
+                       cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+                       if (!cmd)
+                               goto reject;
+               }
 
                ret = iscsit_handle_text_cmd(conn, cmd, buf);
                break;
index ab4915c0d933a07021b076cf15232b85dc9dcf08..47e249dccb5fe7d9652bea77bddc00b35dd98429 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
index 9059c1e0b26e559dc0c1f69b3b97625d7cb806e4..48384b675e624b9b04011fe03750c11170961ef1 100644 (file)
@@ -28,7 +28,7 @@
 #include <target/configfs_macros.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_erl0.h"
@@ -36,7 +36,7 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_configfs.h"
 
 struct target_fabric_configfs *lio_target_fabric_configfs;
@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info(
                rb += sprintf(page+rb, "InitiatorAlias: %s\n",
                        sess->sess_ops->InitiatorAlias);
 
-               rb += sprintf(page+rb, "LIO Session ID: %u   "
-                       "ISID: 0x%02x %02x %02x %02x %02x %02x  "
-                       "TSIH: %hu  ", sess->sid,
-                       sess->isid[0], sess->isid[1], sess->isid[2],
-                       sess->isid[3], sess->isid[4], sess->isid[5],
-                       sess->tsih);
+               rb += sprintf(page+rb,
+                             "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+                             sess->sid, sess->isid, sess->tsih);
                rb += sprintf(page+rb, "SessionType: %s\n",
                                (sess->sess_ops->SessionType) ?
                                "Discovery" : "Normal");
@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid(
        /*
         * iSCSI Initiator Session Identifier from RFC-3720.
         */
-       return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
-               sess->isid[0], sess->isid[1], sess->isid[2],
-               sess->isid[3], sess->isid[4], sess->isid[5]);
+       return snprintf(buf, size, "%6phN", sess->isid);
 }
 
 static int lio_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
deleted file mode 100644 (file)
index cbcff38..0000000
+++ /dev/null
@@ -1,883 +0,0 @@
-#ifndef ISCSI_TARGET_CORE_H
-#define ISCSI_TARGET_CORE_H
-
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
-
-#define ISCSIT_VERSION                 "v4.1.0"
-#define ISCSI_MAX_DATASN_MISSING_COUNT 16
-#define ISCSI_TX_THREAD_TCP_TIMEOUT    2
-#define ISCSI_RX_THREAD_TCP_TIMEOUT    2
-#define SECONDS_FOR_ASYNC_LOGOUT       10
-#define SECONDS_FOR_ASYNC_TEXT         10
-#define SECONDS_FOR_LOGOUT_COMP                15
-#define WHITE_SPACE                    " \t\v\f\n\r"
-#define ISCSIT_MIN_TAGS                        16
-#define ISCSIT_EXTRA_TAGS              8
-#define ISCSIT_TCP_BACKLOG             256
-
-/* struct iscsi_node_attrib sanity values */
-#define NA_DATAOUT_TIMEOUT             3
-#define NA_DATAOUT_TIMEOUT_MAX         60
-#define NA_DATAOUT_TIMEOUT_MIX         2
-#define NA_DATAOUT_TIMEOUT_RETRIES     5
-#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
-#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
-#define NA_NOPIN_TIMEOUT               15
-#define NA_NOPIN_TIMEOUT_MAX           60
-#define NA_NOPIN_TIMEOUT_MIN           3
-#define NA_NOPIN_RESPONSE_TIMEOUT      30
-#define NA_NOPIN_RESPONSE_TIMEOUT_MAX  60
-#define NA_NOPIN_RESPONSE_TIMEOUT_MIN  3
-#define NA_RANDOM_DATAIN_PDU_OFFSETS   0
-#define NA_RANDOM_DATAIN_SEQ_OFFSETS   0
-#define NA_RANDOM_R2T_OFFSETS          0
-
-/* struct iscsi_tpg_attrib sanity values */
-#define TA_AUTHENTICATION              1
-#define TA_LOGIN_TIMEOUT               15
-#define TA_LOGIN_TIMEOUT_MAX           30
-#define TA_LOGIN_TIMEOUT_MIN           5
-#define TA_NETIF_TIMEOUT               2
-#define TA_NETIF_TIMEOUT_MAX           15
-#define TA_NETIF_TIMEOUT_MIN           2
-#define TA_GENERATE_NODE_ACLS          0
-#define TA_DEFAULT_CMDSN_DEPTH         64
-#define TA_DEFAULT_CMDSN_DEPTH_MAX     512
-#define TA_DEFAULT_CMDSN_DEPTH_MIN     1
-#define TA_CACHE_DYNAMIC_ACLS          0
-/* Enabled by default in demo mode (generic_node_acls=1) */
-#define TA_DEMO_MODE_WRITE_PROTECT     1
-/* Disabled by default in production mode w/ explict ACLs */
-#define TA_PROD_MODE_WRITE_PROTECT     0
-#define TA_DEMO_MODE_DISCOVERY         1
-#define TA_DEFAULT_ERL                 0
-#define TA_CACHE_CORE_NPS              0
-/* T10 protection information disabled by default */
-#define TA_DEFAULT_T10_PI              0
-
-#define ISCSI_IOV_DATA_BUFFER          5
-
-enum iscsit_transport_type {
-       ISCSI_TCP                               = 0,
-       ISCSI_SCTP_TCP                          = 1,
-       ISCSI_SCTP_UDP                          = 2,
-       ISCSI_IWARP_TCP                         = 3,
-       ISCSI_IWARP_SCTP                        = 4,
-       ISCSI_INFINIBAND                        = 5,
-};
-
-/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
-enum target_conn_state_table {
-       TARG_CONN_STATE_FREE                    = 0x1,
-       TARG_CONN_STATE_XPT_UP                  = 0x3,
-       TARG_CONN_STATE_IN_LOGIN                = 0x4,
-       TARG_CONN_STATE_LOGGED_IN               = 0x5,
-       TARG_CONN_STATE_IN_LOGOUT               = 0x6,
-       TARG_CONN_STATE_LOGOUT_REQUESTED        = 0x7,
-       TARG_CONN_STATE_CLEANUP_WAIT            = 0x8,
-};
-
-/* RFC-3720 7.3.2  Session State Diagram for a Target */
-enum target_sess_state_table {
-       TARG_SESS_STATE_FREE                    = 0x1,
-       TARG_SESS_STATE_ACTIVE                  = 0x2,
-       TARG_SESS_STATE_LOGGED_IN               = 0x3,
-       TARG_SESS_STATE_FAILED                  = 0x4,
-       TARG_SESS_STATE_IN_CONTINUE             = 0x5,
-};
-
-/* struct iscsi_data_count->type */
-enum data_count_type {
-       ISCSI_RX_DATA   = 1,
-       ISCSI_TX_DATA   = 2,
-};
-
-/* struct iscsi_datain_req->dr_complete */
-enum datain_req_comp_table {
-       DATAIN_COMPLETE_NORMAL                  = 1,
-       DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
-       DATAIN_COMPLETE_CONNECTION_RECOVERY     = 3,
-};
-
-/* struct iscsi_datain_req->recovery */
-enum datain_req_rec_table {
-       DATAIN_WITHIN_COMMAND_RECOVERY          = 1,
-       DATAIN_CONNECTION_RECOVERY              = 2,
-};
-
-/* struct iscsi_portal_group->state */
-enum tpg_state_table {
-       TPG_STATE_FREE                          = 0,
-       TPG_STATE_ACTIVE                        = 1,
-       TPG_STATE_INACTIVE                      = 2,
-       TPG_STATE_COLD_RESET                    = 3,
-};
-
-/* struct iscsi_tiqn->tiqn_state */
-enum tiqn_state_table {
-       TIQN_STATE_ACTIVE                       = 1,
-       TIQN_STATE_SHUTDOWN                     = 2,
-};
-
-/* struct iscsi_cmd->cmd_flags */
-enum cmd_flags_table {
-       ICF_GOT_LAST_DATAOUT                    = 0x00000001,
-       ICF_GOT_DATACK_SNACK                    = 0x00000002,
-       ICF_NON_IMMEDIATE_UNSOLICITED_DATA      = 0x00000004,
-       ICF_SENT_LAST_R2T                       = 0x00000008,
-       ICF_WITHIN_COMMAND_RECOVERY             = 0x00000010,
-       ICF_CONTIG_MEMORY                       = 0x00000020,
-       ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
-       ICF_OOO_CMDSN                           = 0x00000080,
-       ICF_SENDTARGETS_ALL                     = 0x00000100,
-       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
-};
-
-/* struct iscsi_cmd->i_state */
-enum cmd_i_state_table {
-       ISTATE_NO_STATE                 = 0,
-       ISTATE_NEW_CMD                  = 1,
-       ISTATE_DEFERRED_CMD             = 2,
-       ISTATE_UNSOLICITED_DATA         = 3,
-       ISTATE_RECEIVE_DATAOUT          = 4,
-       ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
-       ISTATE_RECEIVED_LAST_DATAOUT    = 6,
-       ISTATE_WITHIN_DATAOUT_RECOVERY  = 7,
-       ISTATE_IN_CONNECTION_RECOVERY   = 8,
-       ISTATE_RECEIVED_TASKMGT         = 9,
-       ISTATE_SEND_ASYNCMSG            = 10,
-       ISTATE_SENT_ASYNCMSG            = 11,
-       ISTATE_SEND_DATAIN              = 12,
-       ISTATE_SEND_LAST_DATAIN         = 13,
-       ISTATE_SENT_LAST_DATAIN         = 14,
-       ISTATE_SEND_LOGOUTRSP           = 15,
-       ISTATE_SENT_LOGOUTRSP           = 16,
-       ISTATE_SEND_NOPIN               = 17,
-       ISTATE_SENT_NOPIN               = 18,
-       ISTATE_SEND_REJECT              = 19,
-       ISTATE_SENT_REJECT              = 20,
-       ISTATE_SEND_R2T                 = 21,
-       ISTATE_SENT_R2T                 = 22,
-       ISTATE_SEND_R2T_RECOVERY        = 23,
-       ISTATE_SENT_R2T_RECOVERY        = 24,
-       ISTATE_SEND_LAST_R2T            = 25,
-       ISTATE_SENT_LAST_R2T            = 26,
-       ISTATE_SEND_LAST_R2T_RECOVERY   = 27,
-       ISTATE_SENT_LAST_R2T_RECOVERY   = 28,
-       ISTATE_SEND_STATUS              = 29,
-       ISTATE_SEND_STATUS_BROKEN_PC    = 30,
-       ISTATE_SENT_STATUS              = 31,
-       ISTATE_SEND_STATUS_RECOVERY     = 32,
-       ISTATE_SENT_STATUS_RECOVERY     = 33,
-       ISTATE_SEND_TASKMGTRSP          = 34,
-       ISTATE_SENT_TASKMGTRSP          = 35,
-       ISTATE_SEND_TEXTRSP             = 36,
-       ISTATE_SENT_TEXTRSP             = 37,
-       ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
-       ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
-       ISTATE_SEND_NOPIN_NO_RESPONSE   = 40,
-       ISTATE_REMOVE                   = 41,
-       ISTATE_FREE                     = 42,
-};
-
-/* Used for iscsi_recover_cmdsn() return values */
-enum recover_cmdsn_ret_table {
-       CMDSN_ERROR_CANNOT_RECOVER      = -1,
-       CMDSN_NORMAL_OPERATION          = 0,
-       CMDSN_LOWER_THAN_EXP            = 1,
-       CMDSN_HIGHER_THAN_EXP           = 2,
-       CMDSN_MAXCMDSN_OVERRUN          = 3,
-};
-
-/* Used for iscsi_handle_immediate_data() return values */
-enum immedate_data_ret_table {
-       IMMEDIATE_DATA_CANNOT_RECOVER   = -1,
-       IMMEDIATE_DATA_NORMAL_OPERATION = 0,
-       IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
-};
-
-/* Used for iscsi_decide_dataout_action() return values */
-enum dataout_action_ret_table {
-       DATAOUT_CANNOT_RECOVER          = -1,
-       DATAOUT_NORMAL                  = 0,
-       DATAOUT_SEND_R2T                = 1,
-       DATAOUT_SEND_TO_TRANSPORT       = 2,
-       DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
-};
-
-/* Used for struct iscsi_node_auth->naf_flags */
-enum naf_flags_table {
-       NAF_USERID_SET                  = 0x01,
-       NAF_PASSWORD_SET                = 0x02,
-       NAF_USERID_IN_SET               = 0x04,
-       NAF_PASSWORD_IN_SET             = 0x08,
-};
-
-/* Used by various struct timer_list to manage iSCSI specific state */
-enum iscsi_timer_flags_table {
-       ISCSI_TF_RUNNING                = 0x01,
-       ISCSI_TF_STOP                   = 0x02,
-       ISCSI_TF_EXPIRED                = 0x04,
-};
-
-/* Used for struct iscsi_np->np_flags */
-enum np_flags_table {
-       NPF_IP_NETWORK          = 0x00,
-};
-
-/* Used for struct iscsi_np->np_thread_state */
-enum np_thread_state_table {
-       ISCSI_NP_THREAD_ACTIVE          = 1,
-       ISCSI_NP_THREAD_INACTIVE        = 2,
-       ISCSI_NP_THREAD_RESET           = 3,
-       ISCSI_NP_THREAD_SHUTDOWN        = 4,
-       ISCSI_NP_THREAD_EXIT            = 5,
-};
-
-struct iscsi_conn_ops {
-       u8      HeaderDigest;                   /* [0,1] == [None,CRC32C] */
-       u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
-       u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
-       u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
-       u8      OFMarker;                       /* [0,1] == [No,Yes] */
-       u8      IFMarker;                       /* [0,1] == [No,Yes] */
-       u32     OFMarkInt;                      /* [1..65535] */
-       u32     IFMarkInt;                      /* [1..65535] */
-       /*
-        * iSER specific connection parameters
-        */
-       u32     InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
-       u32     TargetRecvDataSegmentLength;    /* [512..2**24-1] */
-};
-
-struct iscsi_sess_ops {
-       char    InitiatorName[224];
-       char    InitiatorAlias[256];
-       char    TargetName[224];
-       char    TargetAlias[256];
-       char    TargetAddress[256];
-       u16     TargetPortalGroupTag;           /* [0..65535] */
-       u16     MaxConnections;                 /* [1..65535] */
-       u8      InitialR2T;                     /* [0,1] == [No,Yes] */
-       u8      ImmediateData;                  /* [0,1] == [No,Yes] */
-       u32     MaxBurstLength;                 /* [512..2**24-1] */
-       u32     FirstBurstLength;               /* [512..2**24-1] */
-       u16     DefaultTime2Wait;               /* [0..3600] */
-       u16     DefaultTime2Retain;             /* [0..3600] */
-       u16     MaxOutstandingR2T;              /* [1..65535] */
-       u8      DataPDUInOrder;                 /* [0,1] == [No,Yes] */
-       u8      DataSequenceInOrder;            /* [0,1] == [No,Yes] */
-       u8      ErrorRecoveryLevel;             /* [0..2] */
-       u8      SessionType;                    /* [0,1] == [Normal,Discovery]*/
-       /*
-        * iSER specific session parameters
-        */
-       u8      RDMAExtensions;                 /* [0,1] == [No,Yes] */
-};
-
-struct iscsi_queue_req {
-       int                     state;
-       struct iscsi_cmd        *cmd;
-       struct list_head        qr_list;
-};
-
-struct iscsi_data_count {
-       int                     data_length;
-       int                     sync_and_steering;
-       enum data_count_type    type;
-       u32                     iov_count;
-       u32                     ss_iov_count;
-       u32                     ss_marker_count;
-       struct kvec             *iov;
-};
-
-struct iscsi_param_list {
-       bool                    iser;
-       struct list_head        param_list;
-       struct list_head        extra_response_list;
-};
-
-struct iscsi_datain_req {
-       enum datain_req_comp_table dr_complete;
-       int                     generate_recovery_values;
-       enum datain_req_rec_table recovery;
-       u32                     begrun;
-       u32                     runlength;
-       u32                     data_length;
-       u32                     data_offset;
-       u32                     data_sn;
-       u32                     next_burst_len;
-       u32                     read_data_done;
-       u32                     seq_send_order;
-       struct list_head        cmd_datain_node;
-} ____cacheline_aligned;
-
-struct iscsi_ooo_cmdsn {
-       u16                     cid;
-       u32                     batch_count;
-       u32                     cmdsn;
-       u32                     exp_cmdsn;
-       struct iscsi_cmd        *cmd;
-       struct list_head        ooo_list;
-} ____cacheline_aligned;
-
-struct iscsi_datain {
-       u8                      flags;
-       u32                     data_sn;
-       u32                     length;
-       u32                     offset;
-} ____cacheline_aligned;
-
-struct iscsi_r2t {
-       int                     seq_complete;
-       int                     recovery_r2t;
-       int                     sent_r2t;
-       u32                     r2t_sn;
-       u32                     offset;
-       u32                     targ_xfer_tag;
-       u32                     xfer_len;
-       struct list_head        r2t_list;
-} ____cacheline_aligned;
-
-struct iscsi_cmd {
-       enum iscsi_timer_flags_table dataout_timer_flags;
-       /* DataOUT timeout retries */
-       u8                      dataout_timeout_retries;
-       /* Within command recovery count */
-       u8                      error_recovery_count;
-       /* iSCSI dependent state for out or order CmdSNs */
-       enum cmd_i_state_table  deferred_i_state;
-       /* iSCSI dependent state */
-       enum cmd_i_state_table  i_state;
-       /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
-       u8                      immediate_cmd;
-       /* Immediate data present */
-       u8                      immediate_data;
-       /* iSCSI Opcode */
-       u8                      iscsi_opcode;
-       /* iSCSI Response Code */
-       u8                      iscsi_response;
-       /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
-       u8                      logout_reason;
-       /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
-       u8                      logout_response;
-       /* MaxCmdSN has been incremented */
-       u8                      maxcmdsn_inc;
-       /* Immediate Unsolicited Dataout */
-       u8                      unsolicited_data;
-       /* Reject reason code */
-       u8                      reject_reason;
-       /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
-       u16                     logout_cid;
-       /* Command flags */
-       enum cmd_flags_table    cmd_flags;
-       /* Initiator Task Tag assigned from Initiator */
-       itt_t                   init_task_tag;
-       /* Target Transfer Tag assigned from Target */
-       u32                     targ_xfer_tag;
-       /* CmdSN assigned from Initiator */
-       u32                     cmd_sn;
-       /* ExpStatSN assigned from Initiator */
-       u32                     exp_stat_sn;
-       /* StatSN assigned to this ITT */
-       u32                     stat_sn;
-       /* DataSN Counter */
-       u32                     data_sn;
-       /* R2TSN Counter */
-       u32                     r2t_sn;
-       /* Last DataSN acknowledged via DataAck SNACK */
-       u32                     acked_data_sn;
-       /* Used for echoing NOPOUT ping data */
-       u32                     buf_ptr_size;
-       /* Used to store DataDigest */
-       u32                     data_crc;
-       /* Counter for MaxOutstandingR2T */
-       u32                     outstanding_r2ts;
-       /* Next R2T Offset when DataSequenceInOrder=Yes */
-       u32                     r2t_offset;
-       /* Iovec current and orig count for iscsi_cmd->iov_data */
-       u32                     iov_data_count;
-       u32                     orig_iov_data_count;
-       /* Number of miscellaneous iovecs used for IP stack calls */
-       u32                     iov_misc_count;
-       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
-       u32                     pdu_count;
-       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
-       u32                     pdu_send_order;
-       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
-       u32                     pdu_start;
-       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
-       u32                     seq_send_order;
-       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
-       u32                     seq_count;
-       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
-       u32                     seq_no;
-       /* Lowest offset in current DataOUT sequence */
-       u32                     seq_start_offset;
-       /* Highest offset in current DataOUT sequence */
-       u32                     seq_end_offset;
-       /* Total size in bytes received so far of READ data */
-       u32                     read_data_done;
-       /* Total size in bytes received so far of WRITE data */
-       u32                     write_data_done;
-       /* Counter for FirstBurstLength key */
-       u32                     first_burst_len;
-       /* Counter for MaxBurstLength key */
-       u32                     next_burst_len;
-       /* Transfer size used for IP stack calls */
-       u32                     tx_size;
-       /* Buffer used for various purposes */
-       void                    *buf_ptr;
-       /* Used by SendTargets=[iqn.,eui.] discovery */
-       void                    *text_in_ptr;
-       /* See include/linux/dma-mapping.h */
-       enum dma_data_direction data_direction;
-       /* iSCSI PDU Header + CRC */
-       unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
-       /* Number of times struct iscsi_cmd is present in immediate queue */
-       atomic_t                immed_queue_count;
-       atomic_t                response_queue_count;
-       spinlock_t              datain_lock;
-       spinlock_t              dataout_timeout_lock;
-       /* spinlock for protecting struct iscsi_cmd->i_state */
-       spinlock_t              istate_lock;
-       /* spinlock for adding within command recovery entries */
-       spinlock_t              error_lock;
-       /* spinlock for adding R2Ts */
-       spinlock_t              r2t_lock;
-       /* DataIN List */
-       struct list_head        datain_list;
-       /* R2T List */
-       struct list_head        cmd_r2t_list;
-       /* Timer for DataOUT */
-       struct timer_list       dataout_timer;
-       /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
-       struct kvec             *iov_data;
-       /* Iovecs for miscellaneous purposes */
-#define ISCSI_MISC_IOVECS                      5
-       struct kvec             iov_misc[ISCSI_MISC_IOVECS];
-       /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
-       struct iscsi_pdu        *pdu_list;
-       /* Current struct iscsi_pdu used for DataPDUInOrder=No */
-       struct iscsi_pdu        *pdu_ptr;
-       /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
-       struct iscsi_seq        *seq_list;
-       /* Current struct iscsi_seq used for DataSequenceInOrder=No */
-       struct iscsi_seq        *seq_ptr;
-       /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
-       struct iscsi_tmr_req    *tmr_req;
-       /* Connection this command is alligient to */
-       struct iscsi_conn       *conn;
-       /* Pointer to connection recovery entry */
-       struct iscsi_conn_recovery *cr;
-       /* Session the command is part of,  used for connection recovery */
-       struct iscsi_session    *sess;
-       /* list_head for connection list */
-       struct list_head        i_conn_node;
-       /* The TCM I/O descriptor that is accessed via container_of() */
-       struct se_cmd           se_cmd;
-       /* Sense buffer that will be mapped into outgoing status */
-#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
-       unsigned char           sense_buffer[ISCSI_SENSE_BUFFER_LEN];
-
-       u32                     padding;
-       u8                      pad_bytes[4];
-
-       struct scatterlist      *first_data_sg;
-       u32                     first_data_sg_off;
-       u32                     kmapped_nents;
-       sense_reason_t          sense_reason;
-}  ____cacheline_aligned;
-
-struct iscsi_tmr_req {
-       bool                    task_reassign:1;
-       u32                     exp_data_sn;
-       struct iscsi_cmd        *ref_cmd;
-       struct iscsi_conn_recovery *conn_recovery;
-       struct se_tmr_req       *se_tmr_req;
-};
-
-struct iscsi_conn {
-       wait_queue_head_t       queues_wq;
-       /* Authentication Successful for this connection */
-       u8                      auth_complete;
-       /* State connection is currently in */
-       u8                      conn_state;
-       u8                      conn_logout_reason;
-       u8                      network_transport;
-       enum iscsi_timer_flags_table nopin_timer_flags;
-       enum iscsi_timer_flags_table nopin_response_timer_flags;
-       /* Used to know what thread encountered a transport failure */
-       u8                      which_thread;
-       /* connection id assigned by the Initiator */
-       u16                     cid;
-       /* Remote TCP Port */
-       u16                     login_port;
-       u16                     local_port;
-       int                     net_size;
-       int                     login_family;
-       u32                     auth_id;
-       u32                     conn_flags;
-       /* Used for iscsi_tx_login_rsp() */
-       itt_t                   login_itt;
-       u32                     exp_statsn;
-       /* Per connection status sequence number */
-       u32                     stat_sn;
-       /* IFMarkInt's Current Value */
-       u32                     if_marker;
-       /* OFMarkInt's Current Value */
-       u32                     of_marker;
-       /* Used for calculating OFMarker offset to next PDU */
-       u32                     of_marker_offset;
-#define IPV6_ADDRESS_SPACE                             48
-       unsigned char           login_ip[IPV6_ADDRESS_SPACE];
-       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
-       int                     conn_usage_count;
-       int                     conn_waiting_on_uc;
-       atomic_t                check_immediate_queue;
-       atomic_t                conn_logout_remove;
-       atomic_t                connection_exit;
-       atomic_t                connection_recovery;
-       atomic_t                connection_reinstatement;
-       atomic_t                connection_wait_rcfr;
-       atomic_t                sleep_on_conn_wait_comp;
-       atomic_t                transport_failed;
-       struct completion       conn_post_wait_comp;
-       struct completion       conn_wait_comp;
-       struct completion       conn_wait_rcfr_comp;
-       struct completion       conn_waiting_on_uc_comp;
-       struct completion       conn_logout_comp;
-       struct completion       tx_half_close_comp;
-       struct completion       rx_half_close_comp;
-       /* socket used by this connection */
-       struct socket           *sock;
-       void                    (*orig_data_ready)(struct sock *);
-       void                    (*orig_state_change)(struct sock *);
-#define LOGIN_FLAGS_READ_ACTIVE                1
-#define LOGIN_FLAGS_CLOSED             2
-#define LOGIN_FLAGS_READY              4
-       unsigned long           login_flags;
-       struct delayed_work     login_work;
-       struct delayed_work     login_cleanup_work;
-       struct iscsi_login      *login;
-       struct timer_list       nopin_timer;
-       struct timer_list       nopin_response_timer;
-       struct timer_list       transport_timer;
-       struct task_struct      *login_kworker;
-       /* Spinlock used for add/deleting cmd's from conn_cmd_list */
-       spinlock_t              cmd_lock;
-       spinlock_t              conn_usage_lock;
-       spinlock_t              immed_queue_lock;
-       spinlock_t              nopin_timer_lock;
-       spinlock_t              response_queue_lock;
-       spinlock_t              state_lock;
-       /* libcrypto RX and TX contexts for crc32c */
-       struct hash_desc        conn_rx_hash;
-       struct hash_desc        conn_tx_hash;
-       /* Used for scheduling TX and RX connection kthreads */
-       cpumask_var_t           conn_cpumask;
-       unsigned int            conn_rx_reset_cpumask:1;
-       unsigned int            conn_tx_reset_cpumask:1;
-       /* list_head of struct iscsi_cmd for this connection */
-       struct list_head        conn_cmd_list;
-       struct list_head        immed_queue_list;
-       struct list_head        response_queue_list;
-       struct iscsi_conn_ops   *conn_ops;
-       struct iscsi_login      *conn_login;
-       struct iscsit_transport *conn_transport;
-       struct iscsi_param_list *param_list;
-       /* Used for per connection auth state machine */
-       void                    *auth_protocol;
-       void                    *context;
-       struct iscsi_login_thread_s *login_thread;
-       struct iscsi_portal_group *tpg;
-       struct iscsi_tpg_np     *tpg_np;
-       /* Pointer to parent session */
-       struct iscsi_session    *sess;
-       /* Pointer to thread_set in use for this conn's threads */
-       struct iscsi_thread_set *thread_set;
-       /* list_head for session connection list */
-       struct list_head        conn_list;
-} ____cacheline_aligned;
-
-struct iscsi_conn_recovery {
-       u16                     cid;
-       u32                     cmd_count;
-       u32                     maxrecvdatasegmentlength;
-       u32                     maxxmitdatasegmentlength;
-       int                     ready_for_reallegiance;
-       struct list_head        conn_recovery_cmd_list;
-       spinlock_t              conn_recovery_cmd_lock;
-       struct timer_list       time2retain_timer;
-       struct iscsi_session    *sess;
-       struct list_head        cr_list;
-}  ____cacheline_aligned;
-
-struct iscsi_session {
-       u8                      initiator_vendor;
-       u8                      isid[6];
-       enum iscsi_timer_flags_table time2retain_timer_flags;
-       u8                      version_active;
-       u16                     cid_called;
-       u16                     conn_recovery_count;
-       u16                     tsih;
-       /* state session is currently in */
-       u32                     session_state;
-       /* session wide counter: initiator assigned task tag */
-       itt_t                   init_task_tag;
-       /* session wide counter: target assigned task tag */
-       u32                     targ_xfer_tag;
-       u32                     cmdsn_window;
-
-       /* protects cmdsn values */
-       struct mutex            cmdsn_mutex;
-       /* session wide counter: expected command sequence number */
-       u32                     exp_cmd_sn;
-       /* session wide counter: maximum allowed command sequence number */
-       u32                     max_cmd_sn;
-       struct list_head        sess_ooo_cmdsn_list;
-
-       /* LIO specific session ID */
-       u32                     sid;
-       char                    auth_type[8];
-       /* unique within the target */
-       int                     session_index;
-       /* Used for session reference counting */
-       int                     session_usage_count;
-       int                     session_waiting_on_uc;
-       atomic_long_t           cmd_pdus;
-       atomic_long_t           rsp_pdus;
-       atomic_long_t           tx_data_octets;
-       atomic_long_t           rx_data_octets;
-       atomic_long_t           conn_digest_errors;
-       atomic_long_t           conn_timeout_errors;
-       u64                     creation_time;
-       /* Number of active connections */
-       atomic_t                nconn;
-       atomic_t                session_continuation;
-       atomic_t                session_fall_back_to_erl0;
-       atomic_t                session_logout;
-       atomic_t                session_reinstatement;
-       atomic_t                session_stop_active;
-       atomic_t                sleep_on_sess_wait_comp;
-       /* connection list */
-       struct list_head        sess_conn_list;
-       struct list_head        cr_active_list;
-       struct list_head        cr_inactive_list;
-       spinlock_t              conn_lock;
-       spinlock_t              cr_a_lock;
-       spinlock_t              cr_i_lock;
-       spinlock_t              session_usage_lock;
-       spinlock_t              ttt_lock;
-       struct completion       async_msg_comp;
-       struct completion       reinstatement_comp;
-       struct completion       session_wait_comp;
-       struct completion       session_waiting_on_uc_comp;
-       struct timer_list       time2retain_timer;
-       struct iscsi_sess_ops   *sess_ops;
-       struct se_session       *se_sess;
-       struct iscsi_portal_group *tpg;
-} ____cacheline_aligned;
-
-struct iscsi_login {
-       u8 auth_complete;
-       u8 checked_for_existing;
-       u8 current_stage;
-       u8 leading_connection;
-       u8 first_request;
-       u8 version_min;
-       u8 version_max;
-       u8 login_complete;
-       u8 login_failed;
-       bool zero_tsih;
-       char isid[6];
-       u32 cmd_sn;
-       itt_t init_task_tag;
-       u32 initial_exp_statsn;
-       u32 rsp_length;
-       u16 cid;
-       u16 tsih;
-       char req[ISCSI_HDR_LEN];
-       char rsp[ISCSI_HDR_LEN];
-       char *req_buf;
-       char *rsp_buf;
-       struct iscsi_conn *conn;
-       struct iscsi_np *np;
-} ____cacheline_aligned;
-
-struct iscsi_node_attrib {
-       u32                     dataout_timeout;
-       u32                     dataout_timeout_retries;
-       u32                     default_erl;
-       u32                     nopin_timeout;
-       u32                     nopin_response_timeout;
-       u32                     random_datain_pdu_offsets;
-       u32                     random_datain_seq_offsets;
-       u32                     random_r2t_offsets;
-       u32                     tmr_cold_reset;
-       u32                     tmr_warm_reset;
-       struct iscsi_node_acl *nacl;
-};
-
-struct se_dev_entry_s;
-
-struct iscsi_node_auth {
-       enum naf_flags_table    naf_flags;
-       int                     authenticate_target;
-       /* Used for iscsit_global->discovery_auth,
-        * set to zero (auth disabled) by default */
-       int                     enforce_discovery_auth;
-#define MAX_USER_LEN                           256
-#define MAX_PASS_LEN                           256
-       char                    userid[MAX_USER_LEN];
-       char                    password[MAX_PASS_LEN];
-       char                    userid_mutual[MAX_USER_LEN];
-       char                    password_mutual[MAX_PASS_LEN];
-};
-
-#include "iscsi_target_stat.h"
-
-struct iscsi_node_stat_grps {
-       struct config_group     iscsi_sess_stats_group;
-       struct config_group     iscsi_conn_stats_group;
-};
-
-struct iscsi_node_acl {
-       struct iscsi_node_attrib node_attrib;
-       struct iscsi_node_auth  node_auth;
-       struct iscsi_node_stat_grps node_stat_grps;
-       struct se_node_acl      se_node_acl;
-};
-
-struct iscsi_tpg_attrib {
-       u32                     authentication;
-       u32                     login_timeout;
-       u32                     netif_timeout;
-       u32                     generate_node_acls;
-       u32                     cache_dynamic_acls;
-       u32                     default_cmdsn_depth;
-       u32                     demo_mode_write_protect;
-       u32                     prod_mode_write_protect;
-       u32                     demo_mode_discovery;
-       u32                     default_erl;
-       u8                      t10_pi;
-       struct iscsi_portal_group *tpg;
-};
-
-struct iscsi_np {
-       int                     np_network_transport;
-       int                     np_ip_proto;
-       int                     np_sock_type;
-       enum np_thread_state_table np_thread_state;
-       bool                    enabled;
-       enum iscsi_timer_flags_table np_login_timer_flags;
-       u32                     np_exports;
-       enum np_flags_table     np_flags;
-       unsigned char           np_ip[IPV6_ADDRESS_SPACE];
-       u16                     np_port;
-       spinlock_t              np_thread_lock;
-       struct completion       np_restart_comp;
-       struct socket           *np_socket;
-       struct __kernel_sockaddr_storage np_sockaddr;
-       struct task_struct      *np_thread;
-       struct timer_list       np_login_timer;
-       void                    *np_context;
-       struct iscsit_transport *np_transport;
-       struct list_head        np_list;
-} ____cacheline_aligned;
-
-struct iscsi_tpg_np {
-       struct iscsi_np         *tpg_np;
-       struct iscsi_portal_group *tpg;
-       struct iscsi_tpg_np     *tpg_np_parent;
-       struct list_head        tpg_np_list;
-       struct list_head        tpg_np_child_list;
-       struct list_head        tpg_np_parent_list;
-       struct se_tpg_np        se_tpg_np;
-       spinlock_t              tpg_np_parent_lock;
-       struct completion       tpg_np_comp;
-       struct kref             tpg_np_kref;
-};
-
-struct iscsi_portal_group {
-       unsigned char           tpg_chap_id;
-       /* TPG State */
-       enum tpg_state_table    tpg_state;
-       /* Target Portal Group Tag */
-       u16                     tpgt;
-       /* Id assigned to target sessions */
-       u16                     ntsih;
-       /* Number of active sessions */
-       u32                     nsessions;
-       /* Number of Network Portals available for this TPG */
-       u32                     num_tpg_nps;
-       /* Per TPG LIO specific session ID. */
-       u32                     sid;
-       /* Spinlock for adding/removing Network Portals */
-       spinlock_t              tpg_np_lock;
-       spinlock_t              tpg_state_lock;
-       struct se_portal_group tpg_se_tpg;
-       struct mutex            tpg_access_lock;
-       struct semaphore        np_login_sem;
-       struct iscsi_tpg_attrib tpg_attrib;
-       struct iscsi_node_auth  tpg_demo_auth;
-       /* Pointer to default list of iSCSI parameters for TPG */
-       struct iscsi_param_list *param_list;
-       struct iscsi_tiqn       *tpg_tiqn;
-       struct list_head        tpg_gnp_list;
-       struct list_head        tpg_list;
-} ____cacheline_aligned;
-
-struct iscsi_wwn_stat_grps {
-       struct config_group     iscsi_stat_group;
-       struct config_group     iscsi_instance_group;
-       struct config_group     iscsi_sess_err_group;
-       struct config_group     iscsi_tgt_attr_group;
-       struct config_group     iscsi_login_stats_group;
-       struct config_group     iscsi_logout_stats_group;
-};
-
-struct iscsi_tiqn {
-#define ISCSI_IQN_LEN                          224
-       unsigned char           tiqn[ISCSI_IQN_LEN];
-       enum tiqn_state_table   tiqn_state;
-       int                     tiqn_access_count;
-       u32                     tiqn_active_tpgs;
-       u32                     tiqn_ntpgs;
-       u32                     tiqn_num_tpg_nps;
-       u32                     tiqn_nsessions;
-       struct list_head        tiqn_list;
-       struct list_head        tiqn_tpg_list;
-       spinlock_t              tiqn_state_lock;
-       spinlock_t              tiqn_tpg_lock;
-       struct se_wwn           tiqn_wwn;
-       struct iscsi_wwn_stat_grps tiqn_stat_grps;
-       int                     tiqn_index;
-       struct iscsi_sess_err_stats  sess_err_stats;
-       struct iscsi_login_stats     login_stats;
-       struct iscsi_logout_stats    logout_stats;
-} ____cacheline_aligned;
-
-struct iscsit_global {
-       /* In core shutdown */
-       u32                     in_shutdown;
-       u32                     active_ts;
-       /* Unique identifier used for the authentication daemon */
-       u32                     auth_id;
-       u32                     inactive_ts;
-       /* Thread Set bitmap count */
-       int                     ts_bitmap_count;
-       /* Thread Set bitmap pointer */
-       unsigned long           *ts_bitmap;
-       /* Used for iSCSI discovery session authentication */
-       struct iscsi_node_acl   discovery_acl;
-       struct iscsi_portal_group       *discovery_tpg;
-};
-
-#endif /* ISCSI_TARGET_CORE_H */
index e93d5a7a3f8168b9330cbd2ff6238f7a588c88f4..fb3b52b124ac3772597a90ccc46cacd1acea825b 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <scsi/iscsi_proto.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_erl1.h"
 #include "iscsi_target_util.h"
index 7087c736daa520fd22306e6cc39000b7c9aa08c0..34c3cd1b05ce8a40d9911da3815bc4a44b8a479c 100644 (file)
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
index a0ae5fc0ad75b5079fc7932f6dacbb19b2c68a64..1c197bad6132be98cd267b6ea666fc24e41e00a9 100644 (file)
@@ -21,7 +21,8 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_transport.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
 #include "iscsi_target_erl0.h"
@@ -939,7 +940,8 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
 
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
-               iscsit_close_connection(conn);
+               if (conn->conn_transport->transport_type == ISCSI_TCP)
+                       iscsit_close_connection(conn);
                return;
        }
 
index cda4d80cfaef999e45e4ac8a6b894513a3a44ad6..2e561deb30a2b6cd53f186b0e388311da35ad439 100644 (file)
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
index 4ca8fd2a70db4c05597f6b4bfbac171ce58ea8c7..e24f1c7c5862d4af2f0ae53efb3e981f153080db 100644 (file)
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target_erl0.h"
index 713c0c1877ab8d16bb999ab6738b058e11c82ba9..153fb66ac1b83693b2a7ea18580ddd4c94f9b5a9 100644 (file)
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_nego.h"
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_erl2.h"
 #include "iscsi_target_login.h"
-#include "iscsi_target_stat.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
index 62a095f36bf2f78b77d2a9b75cea99b9c2f14ecb..8c02fa34716fae5a40dbf8cb09357bba5df2e7bd 100644 (file)
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nego.h"
index 16454a922e2ba9ff2bc532cb0b896997b991a02e..208cca8a363c86de490aed4dcd1103fcd74b4ecc 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <target/target_core_base.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
index 18c29260b4a213d959463b41083abe6555166985..d4f9e96456978eab75e280e123308db09f6e95d2 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <linux/slab.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_parameters.h"
 
index ca41b583f2f6d048927b318d69fd56ae9907c015..e446a09c886b1a2ca1344d87f755806b237fe406 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
index 10339551030767cd64428686bcb66e0a186c530d..5e1349a3b1438ece26d986f31608bd6912393371 100644 (file)
 #include <target/target_core_base.h>
 #include <target/configfs_macros.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #ifndef INITIAL_JIFFIES
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
deleted file mode 100644 (file)
index 3ff76b4..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef ISCSI_TARGET_STAT_H
-#define ISCSI_TARGET_STAT_H
-
-/*
- * For struct iscsi_tiqn->tiqn_wwn default groups
- */
-extern struct config_item_type iscsi_stat_instance_cit;
-extern struct config_item_type iscsi_stat_sess_err_cit;
-extern struct config_item_type iscsi_stat_tgt_attr_cit;
-extern struct config_item_type iscsi_stat_login_cit;
-extern struct config_item_type iscsi_stat_logout_cit;
-
-/*
- * For struct iscsi_session->se_sess default groups
- */
-extern struct config_item_type iscsi_stat_sess_cit;
-
-/* iSCSI session error types */
-#define ISCSI_SESS_ERR_UNKNOWN         0
-#define ISCSI_SESS_ERR_DIGEST          1
-#define ISCSI_SESS_ERR_CXN_TIMEOUT     2
-#define ISCSI_SESS_ERR_PDU_FORMAT      3
-
-/* iSCSI session error stats */
-struct iscsi_sess_err_stats {
-       spinlock_t      lock;
-       u32             digest_errors;
-       u32             cxn_timeout_errors;
-       u32             pdu_format_errors;
-       u32             last_sess_failure_type;
-       char            last_sess_fail_rem_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI login failure types (sub oids) */
-#define ISCSI_LOGIN_FAIL_OTHER         2
-#define ISCSI_LOGIN_FAIL_REDIRECT      3
-#define ISCSI_LOGIN_FAIL_AUTHORIZE     4
-#define ISCSI_LOGIN_FAIL_AUTHENTICATE  5
-#define ISCSI_LOGIN_FAIL_NEGOTIATE     6
-
-/* iSCSI login stats */
-struct iscsi_login_stats {
-       spinlock_t      lock;
-       u32             accepts;
-       u32             other_fails;
-       u32             redirects;
-       u32             authorize_fails;
-       u32             authenticate_fails;
-       u32             negotiate_fails;        /* used for notifications */
-       u64             last_fail_time;         /* time stamp (jiffies) */
-       u32             last_fail_type;
-       int             last_intr_fail_ip_family;
-       unsigned char   last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
-       char            last_intr_fail_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI logout stats */
-struct iscsi_logout_stats {
-       spinlock_t      lock;
-       u32             normal_logouts;
-       u32             abnormal_logouts;
-} ____cacheline_aligned;
-
-#endif   /*** ISCSI_TARGET_STAT_H ***/
index 78404b1cc0bf311eb80b738d68eacca99ea446d0..b0224a77e26d3aff2549b71dd4b8c6a816213557 100644 (file)
@@ -23,7 +23,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
index 9053a3c0c6e51675faf45bfbbfd990d48752f67d..bdd127c0e3aed1c718f0bd8b7d656a2c3aefcb71 100644 (file)
@@ -20,7 +20,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nodeattrib.h"
index 601e9cc61e98e754f9a4927f8be3f0ae6d2fc3c3..26aa509964737cbe570f0b87492bc30b8ec4111b 100644 (file)
 #include <linux/list.h>
 #include <linux/bitmap.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target.h"
 
-static LIST_HEAD(active_ts_list);
 static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(active_ts_lock);
 static DEFINE_SPINLOCK(inactive_ts_lock);
 static DEFINE_SPINLOCK(ts_bitmap_lock);
 
-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
-{
-       spin_lock(&active_ts_lock);
-       list_add_tail(&ts->ts_list, &active_ts_list);
-       iscsit_global->active_ts++;
-       spin_unlock(&active_ts_lock);
-}
-
 static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
 {
+       if (!list_empty(&ts->ts_list)) {
+               WARN_ON(1);
+               return;
+       }
        spin_lock(&inactive_ts_lock);
        list_add_tail(&ts->ts_list, &inactive_ts_list);
        iscsit_global->inactive_ts++;
        spin_unlock(&inactive_ts_lock);
 }
 
-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
-{
-       spin_lock(&active_ts_lock);
-       list_del(&ts->ts_list);
-       iscsit_global->active_ts--;
-       spin_unlock(&active_ts_lock);
-}
-
 static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 {
        struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 
        ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
 
-       list_del(&ts->ts_list);
+       list_del_init(&ts->ts_list);
        iscsit_global->inactive_ts--;
        spin_unlock(&inactive_ts_lock);
 
@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
 
 void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
 {
-       iscsi_add_ts_to_active_list(ts);
-
        spin_lock_bh(&ts->ts_state_lock);
        conn->thread_set = ts;
        ts->conn = conn;
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
 
        if (ts->delay_inactive && (--ts->thread_count == 0)) {
                spin_unlock_bh(&ts->ts_state_lock);
-               iscsi_del_ts_from_active_list(ts);
 
                if (!iscsit_global->in_shutdown)
                        iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
 
        if (ts->delay_inactive && (--ts->thread_count == 0)) {
                spin_unlock_bh(&ts->ts_state_lock);
-               iscsi_del_ts_from_active_list(ts);
 
                if (!iscsit_global->in_shutdown)
                        iscsi_deallocate_extra_thread_sets();
index bcd88ec99793ba554496369d8853ee391cef8c11..390df8ed72b26738c53073180063c42847850585 100644 (file)
@@ -25,7 +25,7 @@
 #include <target/target_core_configfs.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
                        init_task_tag, conn->cid);
        return NULL;
 }
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
 
 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
        struct iscsi_conn *conn,
@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
        state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
                                ISTATE_SEND_NOPIN_NO_RESPONSE;
        cmd->init_task_tag = RESERVED_ITT;
-       spin_lock_bh(&conn->sess->ttt_lock);
-       cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
-                       0xFFFFFFFF;
-       if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
-               cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-       spin_unlock_bh(&conn->sess->ttt_lock);
-
+       cmd->targ_xfer_tag = (want_response) ?
+                            session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
index a68508c4fec862b325c6a5015b9493871f166e08..1ab754a671ff301977a90c90246ae5a223352051 100644 (file)
@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
 extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                               unsigned char * ,__be32 cmdsn);
 extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
                        itt_t, u32);
 extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
index d836de200a03bcf24be54004df89c7d6d5039030..44620fb6bd45e96eae5b13dac4b9582a0d86c85e 100644 (file)
@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd)
                target_complete_cmd(cmd, SAM_STAT_GOOD);
                return 0;
        }
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with FILEIO"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index 78346b850968ed8da28d88f35cf6a3ac15512a1b..d4a4b0fb444a12907fac835f6a3db03600b722fa 100644 (file)
@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd)
        sector_t block_lba = cmd->t_task_lba;
        sector_t sectors = sbc_get_write_same_sectors(cmd);
 
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with IBLOCK"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index 283cf786ef98be3d0594e847cc9749a072986b80..2de6fb8cee8d83b8338472ee2a51dd02c078e833 100644 (file)
@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
                }
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-                       pr_err("Unable to update renaming"
-                               " APTPL metadata\n");
+                       pr_err("Unable to update renaming APTPL metadata,"
+                              " reallocating larger buffer\n");
                        ret = -EMSGSIZE;
                        goto out;
                }
@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
                        lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-                       pr_err("Unable to update renaming"
-                               " APTPL metadata\n");
+                       pr_err("Unable to update renaming APTPL metadata,"
+                              " reallocating larger buffer\n");
                        ret = -EMSGSIZE;
                        goto out;
                }
@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
 static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
 {
        unsigned char *buf;
-       int rc;
+       int rc, len = PR_APTPL_BUF_LEN;
 
        if (!aptpl) {
                char *null_buf = "No Registrations or Reservations\n";
@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
 
                return 0;
        }
-
-       buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+retry:
+       buf = vzalloc(len);
        if (!buf)
                return TCM_OUT_OF_RESOURCES;
 
-       rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+       rc = core_scsi3_update_aptpl_buf(dev, buf, len);
        if (rc < 0) {
-               kfree(buf);
-               return TCM_OUT_OF_RESOURCES;
+               vfree(buf);
+               len *= 2;
+               goto retry;
        }
 
        rc = __core_scsi3_write_aptpl_to_file(dev, buf);
        if (rc != 0) {
                pr_err("SPC-3 PR: Could not update APTPL\n");
-               kfree(buf);
+               vfree(buf);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        dev->t10_pr.pr_aptpl_active = 1;
-       kfree(buf);
+       vfree(buf);
        pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
        return 0;
 }
index cd4bed7b27579b14a0f6e517eed23dacb6c1fe02..9a2f9d3a6e70514ad9351ab6e144709e8b95f91d 100644 (file)
@@ -36,6 +36,9 @@
 #include "target_core_ua.h"
 #include "target_core_alua.h"
 
+static sense_reason_t
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
 {
@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 static sense_reason_t
 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 {
+       struct se_device *dev = cmd->se_dev;
+       sector_t end_lba = dev->transport->get_blocks(dev) + 1;
        unsigned int sectors = sbc_get_write_same_sectors(cmd);
+       sense_reason_t ret;
 
        if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
                pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                        sectors, cmd->se_dev->dev_attrib.max_write_same_len);
                return TCM_INVALID_CDB_FIELD;
        }
+       /*
+        * Sanity check for LBA wrap and request past end of device.
+        */
+       if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+           ((cmd->t_task_lba + sectors) > end_lba)) {
+               pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+                      (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+               return TCM_ADDRESS_OUT_OF_RANGE;
+       }
+
        /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
        if (flags[0] & 0x10) {
                pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                if (!ops->execute_write_same_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+               if (!dev->dev_attrib.emulate_tpws) {
+                       pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
+                              " has emulate_tpws disabled\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
                cmd->execute_cmd = ops->execute_write_same_unmap;
                return 0;
        }
        if (!ops->execute_write_same)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+       ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+       if (ret)
+               return ret;
+
        cmd->execute_cmd = ops->execute_write_same;
        return 0;
 }
@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
        return 0;
 }
 
-static bool
+static sense_reason_t
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
               u32 sectors, bool is_write)
 {
        u8 protect = cdb[1] >> 5;
 
-       if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
-               return true;
+       if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
+               if (protect && !dev->dev_attrib.pi_prot_type) {
+                       pr_err("CDB contains protect bit, but device does not"
+                              " advertise PROTECT=1 feature bit\n");
+                       return TCM_INVALID_CDB_FIELD;
+               }
+               if (cmd->prot_pto)
+                       return TCM_NO_SENSE;
+       }
 
        switch (dev->dev_attrib.pi_prot_type) {
        case TARGET_DIF_TYPE3_PROT:
@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                break;
        case TARGET_DIF_TYPE2_PROT:
                if (protect)
-                       return false;
+                       return TCM_INVALID_CDB_FIELD;
 
                cmd->reftag_seed = cmd->t_task_lba;
                break;
@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                break;
        case TARGET_DIF_TYPE0_PROT:
        default:
-               return true;
+               return TCM_NO_SENSE;
        }
 
        if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
                                   is_write, cmd))
-               return false;
+               return TCM_INVALID_CDB_FIELD;
 
        cmd->prot_type = dev->dev_attrib.pi_prot_type;
        cmd->prot_length = dev->prot_length * sectors;
@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
                 cmd->prot_op, cmd->prot_checks);
 
-       return true;
+       return TCM_NO_SENSE;
+}
+
+static int
+sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+       if (cdb[1] & 0x10) {
+               if (!dev->dev_attrib.emulate_dpo) {
+                       pr_err("Got CDB: 0x%02x with DPO bit set, but device"
+                              " does not advertise support for DPO\n", cdb[0]);
+                       return -EINVAL;
+               }
+       }
+       if (cdb[1] & 0x8) {
+               if (!dev->dev_attrib.emulate_fua_write ||
+                   !dev->dev_attrib.emulate_write_cache) {
+                       pr_err("Got CDB: 0x%02x with FUA bit set, but device"
+                              " does not advertise support for FUA write\n",
+                              cdb[0]);
+                       return -EINVAL;
+               }
+               cmd->se_cmd_flags |= SCF_FUA;
+       }
+       return 0;
 }
 
 sense_reason_t
@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return TCM_INVALID_CDB_FIELD;
                sectors = transport_get_sectors_10(cdb);
 
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
                cmd->t_task_lba = transport_lba_32(cdb);
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 
@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                cmd->transport_complete_callback = &xdreadwrite_callback;
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
        {
@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                case XDWRITEREAD_32:
                        sectors = transport_get_sectors_32(cdb);
 
+                       if (sbc_check_dpofua(dev, cmd, cdb))
+                               return TCM_INVALID_CDB_FIELD;
                        /*
                         * Use WRITE_32 and READ_32 opcodes for the emulated
                         * XDWRITE_READ_32 logic.
@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        cmd->execute_rw = ops->execute_rw;
                        cmd->execute_cmd = sbc_execute_rw;
                        cmd->transport_complete_callback = &xdreadwrite_callback;
-                       if (cdb[1] & 0x8)
-                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb);
@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (!ops->execute_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+               if (!dev->dev_attrib.emulate_tpu) {
+                       pr_err("Got UNMAP, but backend device has"
+                              " emulate_tpu disabled\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
                size = get_unaligned_be16(&cdb[7]);
                cmd->execute_cmd = ops->execute_unmap;
                break;
@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                unsigned long long end_lba;
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
-               if (cmd->t_task_lba + sectors > end_lba) {
+               if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+                   ((cmd->t_task_lba + sectors) > end_lba)) {
                        pr_err("cmd exceeds last lba %llu "
                                "(lba %llu, sectors %u)\n",
                                end_lba, cmd->t_task_lba, sectors);
index 4c71657da56ab3cdc96b5c1f8d784722f10e2c1d..460e9310947399661ce1fbd6ab4ed3e86b4a47e1 100644 (file)
@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
         * support the use of the WRITE SAME (16) command to unmap LBAs.
         */
        if (dev->dev_attrib.emulate_tpws != 0)
-               buf[5] |= 0x40;
+               buf[5] |= 0x40 | 0x20;
 
        return 0;
 }
index d4413698a85f9738d226d5f82793e9d3c6f46abe..ba77a34f659f180357d22ce98c602b77c83882f2 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_INT340X_THERMAL)  += int3400_thermal.o
+obj-$(CONFIG_INT340X_THERMAL)  += int340x_thermal_zone.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3402_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3403_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += processor_thermal_device.o
index 65a98a97df071cdf343776bc1e959dc9808dbbc4..031018e7a65bd72a4ec8ba3a452e16f455e5f3a1 100644 (file)
 
 enum int3400_thermal_uuid {
        INT3400_THERMAL_PASSIVE_1,
-       INT3400_THERMAL_PASSIVE_2,
        INT3400_THERMAL_ACTIVE,
        INT3400_THERMAL_CRITICAL,
-       INT3400_THERMAL_COOLING_MODE,
        INT3400_THERMAL_MAXIMUM_UUID,
 };
 
 static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
        "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
-       "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
        "3A95C389-E4B8-4629-A526-C52C88626BAE",
        "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
-       "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531",
 };
 
 struct int3400_thermal_priv {
@@ -266,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
        result = acpi_parse_art(priv->adev->handle, &priv->art_count,
                                &priv->arts, true);
        if (result)
-               goto free_priv;
-
+               dev_dbg(&pdev->dev, "_ART table parsing error\n");
 
        result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
                                &priv->trts, true);
        if (result)
-               goto free_art;
+               dev_dbg(&pdev->dev, "_TRT table parsing error\n");
 
        platform_set_drvdata(pdev, priv);
 
@@ -285,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
                                                &int3400_thermal_params, 0, 0);
        if (IS_ERR(priv->thermal)) {
                result = PTR_ERR(priv->thermal);
-               goto free_trt;
+               goto free_art_trt;
        }
 
        priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -299,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
 
 free_zone:
        thermal_zone_device_unregister(priv->thermal);
-free_trt:
+free_art_trt:
        kfree(priv->trts);
-free_art:
        kfree(priv->arts);
 free_priv:
        kfree(priv);
index c5cbc3af3a0539260218492bc5aaa6199d8d5307..69df3d960303170070d3a35f2037602129fbb613 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
 #include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
 
-#define ACPI_ACTIVE_COOLING_MAX_NR 10
-
-struct active_trip {
-       unsigned long temp;
-       int id;
-       bool valid;
-};
+#define INT3402_PERF_CHANGED_EVENT     0x80
+#define INT3402_THERMAL_EVENT          0x90
 
 struct int3402_thermal_data {
-       unsigned long *aux_trips;
-       int aux_trip_nr;
-       unsigned long psv_temp;
-       int psv_trip_id;
-       unsigned long crt_temp;
-       int crt_trip_id;
-       unsigned long hot_temp;
-       int hot_trip_id;
-       struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR];
        acpi_handle *handle;
+       struct int34x_thermal_zone *int340x_zone;
 };
 
-static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone,
-                                        unsigned long *temp)
-{
-       struct int3402_thermal_data *d = zone->devdata;
-       unsigned long long tmp;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       /* _TMP returns the temperature in tenths of degrees Kelvin */
-       *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
-
-       return 0;
-}
-
-static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone,
-                                        int trip, unsigned long *temp)
+static void int3402_notify(acpi_handle handle, u32 event, void *data)
 {
-       struct int3402_thermal_data *d = zone->devdata;
-       int i;
-
-       if (trip < d->aux_trip_nr)
-               *temp = d->aux_trips[trip];
-       else if (trip == d->crt_trip_id)
-               *temp = d->crt_temp;
-       else if (trip == d->psv_trip_id)
-               *temp = d->psv_temp;
-       else if (trip == d->hot_trip_id)
-               *temp = d->hot_temp;
-       else {
-               for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
-                       if (d->act_trips[i].valid &&
-                           d->act_trips[i].id == trip) {
-                               *temp = d->act_trips[i].temp;
-                               break;
-                       }
-               }
-               if (i == ACPI_ACTIVE_COOLING_MAX_NR)
-                       return -EINVAL;
+       struct int3402_thermal_data *priv = data;
+
+       if (!priv)
+               return;
+
+       switch (event) {
+       case INT3402_PERF_CHANGED_EVENT:
+               break;
+       case INT3402_THERMAL_EVENT:
+               int340x_thermal_zone_device_update(priv->int340x_zone);
+               break;
+       default:
+               break;
        }
-       return 0;
-}
-
-static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone,
-                                        int trip, enum thermal_trip_type *type)
-{
-       struct int3402_thermal_data *d = zone->devdata;
-       int i;
-
-       if (trip < d->aux_trip_nr)
-               *type = THERMAL_TRIP_PASSIVE;
-       else if (trip == d->crt_trip_id)
-               *type = THERMAL_TRIP_CRITICAL;
-       else if (trip == d->hot_trip_id)
-               *type = THERMAL_TRIP_HOT;
-       else if (trip == d->psv_trip_id)
-               *type = THERMAL_TRIP_PASSIVE;
-       else {
-               for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
-                       if (d->act_trips[i].valid &&
-                           d->act_trips[i].id == trip) {
-                               *type = THERMAL_TRIP_ACTIVE;
-                               break;
-                       }
-               }
-               if (i == ACPI_ACTIVE_COOLING_MAX_NR)
-                       return -EINVAL;
-       }
-       return 0;
-}
-
-static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip,
-                                 unsigned long temp)
-{
-       struct int3402_thermal_data *d = zone->devdata;
-       acpi_status status;
-       char name[10];
-
-       snprintf(name, sizeof(name), "PAT%d", trip);
-       status = acpi_execute_simple_method(d->handle, name,
-                       MILLICELSIUS_TO_DECI_KELVIN(temp));
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       d->aux_trips[trip] = temp;
-       return 0;
-}
-
-static struct thermal_zone_device_ops int3402_thermal_zone_ops = {
-       .get_temp       = int3402_thermal_get_zone_temp,
-       .get_trip_temp  = int3402_thermal_get_trip_temp,
-       .get_trip_type  = int3402_thermal_get_trip_type,
-       .set_trip_temp  = int3402_thermal_set_trip_temp,
-};
-
-static struct thermal_zone_params int3402_thermal_params = {
-       .governor_name = "user_space",
-       .no_hwmon = true,
-};
-
-static int int3402_thermal_get_temp(acpi_handle handle, char *name,
-                                   unsigned long *temp)
-{
-       unsigned long long r;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(handle, name, NULL, &r);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
-       return 0;
 }
 
 static int int3402_thermal_probe(struct platform_device *pdev)
 {
        struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
        struct int3402_thermal_data *d;
-       struct thermal_zone_device *zone;
-       acpi_status status;
-       unsigned long long trip_cnt;
-       int trip_mask = 0, i;
+       int ret;
 
        if (!acpi_has_method(adev->handle, "_TMP"))
                return -ENODEV;
@@ -168,54 +55,33 @@ static int int3402_thermal_probe(struct platform_device *pdev)
        if (!d)
                return -ENOMEM;
 
-       status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
-       if (ACPI_FAILURE(status))
-               trip_cnt = 0;
-       else {
-               d->aux_trips = devm_kzalloc(&pdev->dev,
-                               sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL);
-               if (!d->aux_trips)
-                       return -ENOMEM;
-               trip_mask = trip_cnt - 1;
-               d->handle = adev->handle;
-               d->aux_trip_nr = trip_cnt;
-       }
-
-       d->crt_trip_id = -1;
-       if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp))
-               d->crt_trip_id = trip_cnt++;
-       d->hot_trip_id = -1;
-       if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp))
-               d->hot_trip_id = trip_cnt++;
-       d->psv_trip_id = -1;
-       if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp))
-               d->psv_trip_id = trip_cnt++;
-       for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
-               char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
-               if (int3402_thermal_get_temp(adev->handle, name,
-                                            &d->act_trips[i].temp))
-                       break;
-               d->act_trips[i].id = trip_cnt++;
-               d->act_trips[i].valid = true;
+       d->int340x_zone = int340x_thermal_zone_add(adev, NULL);
+       if (IS_ERR(d->int340x_zone))
+               return PTR_ERR(d->int340x_zone);
+
+       ret = acpi_install_notify_handler(adev->handle,
+                                         ACPI_DEVICE_NOTIFY,
+                                         int3402_notify,
+                                         d);
+       if (ret) {
+               int340x_thermal_zone_remove(d->int340x_zone);
+               return ret;
        }
 
-       zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt,
-                                           trip_mask, d,
-                                           &int3402_thermal_zone_ops,
-                                           &int3402_thermal_params,
-                                           0, 0);
-       if (IS_ERR(zone))
-               return PTR_ERR(zone);
-       platform_set_drvdata(pdev, zone);
+       d->handle = adev->handle;
+       platform_set_drvdata(pdev, d);
 
        return 0;
 }
 
 static int int3402_thermal_remove(struct platform_device *pdev)
 {
-       struct thermal_zone_device *zone = platform_get_drvdata(pdev);
+       struct int3402_thermal_data *d = platform_get_drvdata(pdev);
+
+       acpi_remove_notify_handler(d->handle,
+                                  ACPI_DEVICE_NOTIFY, int3402_notify);
+       int340x_thermal_zone_remove(d->int340x_zone);
 
-       thermal_zone_device_unregister(zone);
        return 0;
 }
 
index 0faf500d8a77874d7c1b6c8a1b3e1195fc9e8065..50a7a08e3a15ee1e351953bd5a9b8308d8c9602f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/acpi.h>
 #include <linux/thermal.h>
 #include <linux/platform_device.h>
+#include "int340x_thermal_zone.h"
 
 #define INT3403_TYPE_SENSOR            0x03
 #define INT3403_TYPE_CHARGER           0x0B
 #define INT3403_PERF_CHANGED_EVENT     0x80
 #define INT3403_THERMAL_EVENT          0x90
 
-#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
-#define KELVIN_OFFSET  2732
-#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
-
+/* Preserved structure for future expandbility */
 struct int3403_sensor {
-       struct thermal_zone_device *tzone;
-       unsigned long *thresholds;
-       unsigned long   crit_temp;
-       int             crit_trip_id;
-       unsigned long   psv_temp;
-       int             psv_trip_id;
-
+       struct int34x_thermal_zone *int340x_zone;
 };
 
 struct int3403_performance_state {
@@ -63,126 +55,6 @@ struct int3403_priv {
        void *priv;
 };
 
-static int sys_get_curr_temp(struct thermal_zone_device *tzone,
-                               unsigned long *temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct acpi_device *device = priv->adev;
-       unsigned long long tmp;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
-
-       return 0;
-}
-
-static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
-               int trip, unsigned long *temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct acpi_device *device = priv->adev;
-       unsigned long long hyst;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       /*
-        * Thermal hysteresis represents a temperature difference.
-        * Kelvin and Celsius have same degree size. So the
-        * conversion here between tenths of degree Kelvin unit
-        * and Milli-Celsius unit is just to multiply 100.
-        */
-       *temp = hyst * 100;
-
-       return 0;
-}
-
-static int sys_get_trip_temp(struct thermal_zone_device *tzone,
-               int trip, unsigned long *temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct int3403_sensor *obj = priv->priv;
-
-       if (priv->type != INT3403_TYPE_SENSOR || !obj)
-               return -EINVAL;
-
-       if (trip == obj->crit_trip_id)
-               *temp = obj->crit_temp;
-       else if (trip == obj->psv_trip_id)
-               *temp = obj->psv_temp;
-       else {
-               /*
-                * get_trip_temp is a mandatory callback but
-                * PATx method doesn't return any value, so return
-                * cached value, which was last set from user space
-                */
-               *temp = obj->thresholds[trip];
-       }
-
-       return 0;
-}
-
-static int sys_get_trip_type(struct thermal_zone_device *thermal,
-               int trip, enum thermal_trip_type *type)
-{
-       struct int3403_priv *priv = thermal->devdata;
-       struct int3403_sensor *obj = priv->priv;
-
-       /* Mandatory callback, may not mean much here */
-       if (trip == obj->crit_trip_id)
-               *type = THERMAL_TRIP_CRITICAL;
-       else
-               *type = THERMAL_TRIP_PASSIVE;
-
-       return 0;
-}
-
-int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
-                                                       unsigned long temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct acpi_device *device = priv->adev;
-       struct int3403_sensor *obj = priv->priv;
-       acpi_status status;
-       char name[10];
-       int ret = 0;
-
-       snprintf(name, sizeof(name), "PAT%d", trip);
-       if (acpi_has_method(device->handle, name)) {
-               status = acpi_execute_simple_method(device->handle, name,
-                               MILLI_CELSIUS_TO_DECI_KELVIN(temp,
-                                                       KELVIN_OFFSET));
-               if (ACPI_FAILURE(status))
-                       ret = -EIO;
-               else
-                       obj->thresholds[trip] = temp;
-       } else {
-               ret = -EIO;
-               dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
-       }
-
-       return ret;
-}
-
-static struct thermal_zone_device_ops tzone_ops = {
-       .get_temp = sys_get_curr_temp,
-       .get_trip_temp = sys_get_trip_temp,
-       .get_trip_type = sys_get_trip_type,
-       .set_trip_temp = sys_set_trip_temp,
-       .get_trip_hyst =  sys_get_trip_hyst,
-};
-
-static struct thermal_zone_params int3403_thermal_params = {
-       .governor_name = "user_space",
-       .no_hwmon = true,
-};
-
 static void int3403_notify(acpi_handle handle,
                u32 event, void *data)
 {
@@ -200,7 +72,7 @@ static void int3403_notify(acpi_handle handle,
        case INT3403_PERF_CHANGED_EVENT:
                break;
        case INT3403_THERMAL_EVENT:
-               thermal_zone_device_update(obj->tzone);
+               int340x_thermal_zone_device_update(obj->int340x_zone);
                break;
        default:
                dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
@@ -208,41 +80,10 @@ static void int3403_notify(acpi_handle handle,
        }
 }
 
-static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
-{
-       unsigned long long crt;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
-
-       return 0;
-}
-
-static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
-{
-       unsigned long long psv;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
-
-       return 0;
-}
-
 static int int3403_sensor_add(struct int3403_priv *priv)
 {
        int result = 0;
-       acpi_status status;
        struct int3403_sensor *obj;
-       unsigned long long trip_cnt;
-       int trip_mask = 0;
 
        obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
        if (!obj)
@@ -250,39 +91,9 @@ static int int3403_sensor_add(struct int3403_priv *priv)
 
        priv->priv = obj;
 
-       status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL,
-                                               &trip_cnt);
-       if (ACPI_FAILURE(status))
-               trip_cnt = 0;
-
-       if (trip_cnt) {
-               /* We have to cache, thresholds can't be readback */
-               obj->thresholds = devm_kzalloc(&priv->pdev->dev,
-                                       sizeof(*obj->thresholds) * trip_cnt,
-                                       GFP_KERNEL);
-               if (!obj->thresholds) {
-                       result = -ENOMEM;
-                       goto err_free_obj;
-               }
-               trip_mask = BIT(trip_cnt) - 1;
-       }
-
-       obj->psv_trip_id = -1;
-       if (!sys_get_trip_psv(priv->adev, &obj->psv_temp))
-               obj->psv_trip_id = trip_cnt++;
-
-       obj->crit_trip_id = -1;
-       if (!sys_get_trip_crt(priv->adev, &obj->crit_temp))
-               obj->crit_trip_id = trip_cnt++;
-
-       obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev),
-                               trip_cnt, trip_mask, priv, &tzone_ops,
-                               &int3403_thermal_params, 0, 0);
-       if (IS_ERR(obj->tzone)) {
-               result = PTR_ERR(obj->tzone);
-               obj->tzone = NULL;
-               goto err_free_obj;
-       }
+       obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL);
+       if (IS_ERR(obj->int340x_zone))
+               return PTR_ERR(obj->int340x_zone);
 
        result = acpi_install_notify_handler(priv->adev->handle,
                        ACPI_DEVICE_NOTIFY, int3403_notify,
@@ -293,7 +104,7 @@ static int int3403_sensor_add(struct int3403_priv *priv)
        return 0;
 
  err_free_obj:
-       thermal_zone_device_unregister(obj->tzone);
+       int340x_thermal_zone_remove(obj->int340x_zone);
        return result;
 }
 
@@ -303,7 +114,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
 
        acpi_remove_notify_handler(priv->adev->handle,
                                   ACPI_DEVICE_NOTIFY, int3403_notify);
-       thermal_zone_device_unregister(obj->tzone);
+       int340x_thermal_zone_remove(obj->int340x_zone);
+
        return 0;
 }
 
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
new file mode 100644 (file)
index 0000000..f88b088
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * int340x_thermal_zone.c
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
+
+static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
+                                        unsigned long *temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       unsigned long long tmp;
+       acpi_status status;
+
+       if (d->override_ops && d->override_ops->get_temp)
+               return d->override_ops->get_temp(zone, temp);
+
+       status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       if (d->lpat_table) {
+               int conv_temp;
+
+               conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp);
+               if (conv_temp < 0)
+                       return conv_temp;
+
+               *temp = (unsigned long)conv_temp * 10;
+       } else
+               /* _TMP returns the temperature in tenths of degrees Kelvin */
+               *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
+
+       return 0;
+}
+
+static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+                                        int trip, unsigned long *temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       int i;
+
+       if (d->override_ops && d->override_ops->get_trip_temp)
+               return d->override_ops->get_trip_temp(zone, trip, temp);
+
+       if (trip < d->aux_trip_nr)
+               *temp = d->aux_trips[trip];
+       else if (trip == d->crt_trip_id)
+               *temp = d->crt_temp;
+       else if (trip == d->psv_trip_id)
+               *temp = d->psv_temp;
+       else if (trip == d->hot_trip_id)
+               *temp = d->hot_temp;
+       else {
+               for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+                       if (d->act_trips[i].valid &&
+                           d->act_trips[i].id == trip) {
+                               *temp = d->act_trips[i].temp;
+                               break;
+                       }
+               }
+               if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+                                        int trip,
+                                        enum thermal_trip_type *type)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       int i;
+
+       if (d->override_ops && d->override_ops->get_trip_type)
+               return d->override_ops->get_trip_type(zone, trip, type);
+
+       if (trip < d->aux_trip_nr)
+               *type = THERMAL_TRIP_PASSIVE;
+       else if (trip == d->crt_trip_id)
+               *type = THERMAL_TRIP_CRITICAL;
+       else if (trip == d->hot_trip_id)
+               *type = THERMAL_TRIP_HOT;
+       else if (trip == d->psv_trip_id)
+               *type = THERMAL_TRIP_PASSIVE;
+       else {
+               for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+                       if (d->act_trips[i].valid &&
+                           d->act_trips[i].id == trip) {
+                               *type = THERMAL_TRIP_ACTIVE;
+                               break;
+                       }
+               }
+               if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+                                     int trip, unsigned long temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       acpi_status status;
+       char name[10];
+
+       if (d->override_ops && d->override_ops->set_trip_temp)
+               return d->override_ops->set_trip_temp(zone, trip, temp);
+
+       snprintf(name, sizeof(name), "PAT%d", trip);
+       status = acpi_execute_simple_method(d->adev->handle, name,
+                       MILLICELSIUS_TO_DECI_KELVIN(temp));
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       d->aux_trips[trip] = temp;
+
+       return 0;
+}
+
+
+static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
+               int trip, unsigned long *temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       acpi_status status;
+       unsigned long long hyst;
+
+       if (d->override_ops && d->override_ops->get_trip_hyst)
+               return d->override_ops->get_trip_hyst(zone, trip, temp);
+
+       status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       *temp = hyst * 100;
+
+       return 0;
+}
+
+static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
+       .get_temp       = int340x_thermal_get_zone_temp,
+       .get_trip_temp  = int340x_thermal_get_trip_temp,
+       .get_trip_type  = int340x_thermal_get_trip_type,
+       .set_trip_temp  = int340x_thermal_set_trip_temp,
+       .get_trip_hyst =  int340x_thermal_get_trip_hyst,
+};
+
+static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
+                                     unsigned long *temp)
+{
+       unsigned long long r;
+       acpi_status status;
+
+       status = acpi_evaluate_integer(handle, name, NULL, &r);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+
+       return 0;
+}
+
+static struct thermal_zone_params int340x_thermal_params = {
+       .governor_name = "user_space",
+       .no_hwmon = true,
+};
+
+struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+                               struct thermal_zone_device_ops *override_ops)
+{
+       struct int34x_thermal_zone *int34x_thermal_zone;
+       acpi_status status;
+       unsigned long long trip_cnt;
+       int trip_mask = 0, i;
+       int ret;
+
+       int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
+                                     GFP_KERNEL);
+       if (!int34x_thermal_zone)
+               return ERR_PTR(-ENOMEM);
+
+       int34x_thermal_zone->adev = adev;
+       int34x_thermal_zone->override_ops = override_ops;
+
+       status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
+       if (ACPI_FAILURE(status))
+               trip_cnt = 0;
+       else {
+               int34x_thermal_zone->aux_trips = kzalloc(
+                               sizeof(*int34x_thermal_zone->aux_trips) *
+                               trip_cnt, GFP_KERNEL);
+               if (!int34x_thermal_zone->aux_trips) {
+                       ret = -ENOMEM;
+                       goto free_mem;
+               }
+               trip_mask = BIT(trip_cnt) - 1;
+               int34x_thermal_zone->aux_trip_nr = trip_cnt;
+       }
+
+       int34x_thermal_zone->crt_trip_id = -1;
+       if (!int340x_thermal_get_trip_config(adev->handle, "_CRT",
+                                            &int34x_thermal_zone->crt_temp))
+               int34x_thermal_zone->crt_trip_id = trip_cnt++;
+       int34x_thermal_zone->hot_trip_id = -1;
+       if (!int340x_thermal_get_trip_config(adev->handle, "_HOT",
+                                            &int34x_thermal_zone->hot_temp))
+               int34x_thermal_zone->hot_trip_id = trip_cnt++;
+       int34x_thermal_zone->psv_trip_id = -1;
+       if (!int340x_thermal_get_trip_config(adev->handle, "_PSV",
+                                            &int34x_thermal_zone->psv_temp))
+               int34x_thermal_zone->psv_trip_id = trip_cnt++;
+       for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+               char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+
+               if (int340x_thermal_get_trip_config(adev->handle, name,
+                               &int34x_thermal_zone->act_trips[i].temp))
+                       break;
+
+               int34x_thermal_zone->act_trips[i].id = trip_cnt++;
+               int34x_thermal_zone->act_trips[i].valid = true;
+       }
+       int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
+                                                               adev->handle);
+
+       int34x_thermal_zone->zone = thermal_zone_device_register(
+                                               acpi_device_bid(adev),
+                                               trip_cnt,
+                                               trip_mask, int34x_thermal_zone,
+                                               &int340x_thermal_zone_ops,
+                                               &int340x_thermal_params,
+                                               0, 0);
+       if (IS_ERR(int34x_thermal_zone->zone)) {
+               ret = PTR_ERR(int34x_thermal_zone->zone);
+               goto free_lpat;
+       }
+
+       return int34x_thermal_zone;
+
+free_lpat:
+       acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+free_mem:
+       kfree(int34x_thermal_zone);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_zone_add);
+
+void int340x_thermal_zone_remove(struct int34x_thermal_zone
+                                *int34x_thermal_zone)
+{
+       thermal_zone_device_unregister(int34x_thermal_zone->zone);
+       acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+       kfree(int34x_thermal_zone);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+
+MODULE_AUTHOR("Aaron Lu <aaron.lu@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Intel INT340x common thermal zone handler");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
new file mode 100644 (file)
index 0000000..9f38ab7
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * int340x_thermal_zone.h
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __INT340X_THERMAL_ZONE_H__
+#define __INT340X_THERMAL_ZONE_H__
+
+#include <acpi/acpi_lpat.h>
+
+#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT     10
+
+struct active_trip {
+       unsigned long temp;
+       int id;
+       bool valid;
+};
+
+struct int34x_thermal_zone {
+       struct acpi_device *adev;
+       struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
+       unsigned long *aux_trips;
+       int aux_trip_nr;
+       unsigned long psv_temp;
+       int psv_trip_id;
+       unsigned long crt_temp;
+       int crt_trip_id;
+       unsigned long hot_temp;
+       int hot_trip_id;
+       struct thermal_zone_device *zone;
+       struct thermal_zone_device_ops *override_ops;
+       void *priv_data;
+       struct acpi_lpat_conversion_table *lpat_table;
+};
+
+struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
+                               struct thermal_zone_device_ops *override_ops);
+void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
+
+static inline void int340x_thermal_zone_set_priv_data(
+                       struct int34x_thermal_zone *tzone, void *priv_data)
+{
+       tzone->priv_data = priv_data;
+}
+
+static inline void *int340x_thermal_zone_get_priv_data(
+                       struct int34x_thermal_zone *tzone)
+{
+       return tzone->priv_data;
+}
+
+static inline void int340x_thermal_zone_device_update(
+                       struct int34x_thermal_zone *tzone)
+{
+       thermal_zone_device_update(tzone->zone);
+}
+
+#endif
index 0fe5dbbea9687053b835ee12eae553930b5ce1f9..5e8d8e91ea6d9d6056c714b16a3eafd9d09f835e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
+#include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
 
 /* Broadwell-U/HSB thermal reporting device */
 #define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
@@ -39,6 +41,7 @@ struct proc_thermal_device {
        struct device *dev;
        struct acpi_device *adev;
        struct power_config power_limits[2];
+       struct int34x_thermal_zone *int340x_zone;
 };
 
 enum proc_thermal_emum_mode_type {
@@ -117,6 +120,72 @@ static struct attribute_group power_limit_attribute_group = {
        .name = "power_limits"
 };
 
+static int stored_tjmax; /* since it is fixed, we can have local storage */
+
+static int get_tjmax(void)
+{
+       u32 eax, edx;
+       u32 val;
+       int err;
+
+       err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+       if (err)
+               return err;
+
+       val = (eax >> 16) & 0xff;
+       if (val)
+               return val;
+
+       return -EINVAL;
+}
+
+static int read_temp_msr(unsigned long *temp)
+{
+       int cpu;
+       u32 eax, edx;
+       int err;
+       unsigned long curr_temp_off = 0;
+
+       *temp = 0;
+
+       for_each_online_cpu(cpu) {
+               err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
+                                       &edx);
+               if (err)
+                       goto err_ret;
+               else {
+                       if (eax & 0x80000000) {
+                               curr_temp_off = (eax >> 16) & 0x7f;
+                               if (!*temp || curr_temp_off < *temp)
+                                       *temp = curr_temp_off;
+                       } else {
+                               err = -EINVAL;
+                               goto err_ret;
+                       }
+               }
+       }
+
+       return 0;
+err_ret:
+       return err;
+}
+
+static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
+                                        unsigned long *temp)
+{
+       int ret;
+
+       ret = read_temp_msr(temp);
+       if (!ret)
+               *temp = (stored_tjmax - *temp) * 1000;
+
+       return ret;
+}
+
+static struct thermal_zone_device_ops proc_thermal_local_ops = {
+       .get_temp       = proc_thermal_get_zone_temp,
+};
+
 static int proc_thermal_add(struct device *dev,
                            struct proc_thermal_device **priv)
 {
@@ -126,6 +195,8 @@ static int proc_thermal_add(struct device *dev,
        struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *elements, *ppcc;
        union acpi_object *p;
+       unsigned long long tmp;
+       struct thermal_zone_device_ops *ops = NULL;
        int i;
        int ret;
 
@@ -178,6 +249,24 @@ static int proc_thermal_add(struct device *dev,
 
        ret = sysfs_create_group(&dev->kobj,
                                 &power_limit_attribute_group);
+       if (ret)
+               goto free_buffer;
+
+       status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
+       if (ACPI_FAILURE(status)) {
+               /* there is no _TMP method, add local method */
+               stored_tjmax = get_tjmax();
+               if (stored_tjmax > 0)
+                       ops = &proc_thermal_local_ops;
+       }
+
+       proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
+       if (IS_ERR(proc_priv->int340x_zone)) {
+               sysfs_remove_group(&proc_priv->dev->kobj,
+                          &power_limit_attribute_group);
+               ret = PTR_ERR(proc_priv->int340x_zone);
+       } else
+               ret = 0;
 
 free_buffer:
        kfree(buf.pointer);
@@ -185,8 +274,9 @@ free_buffer:
        return ret;
 }
 
-void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
 {
+       int340x_thermal_zone_remove(proc_priv->int340x_zone);
        sysfs_remove_group(&proc_priv->dev->kobj,
                           &power_limit_attribute_group);
 }
index 6ceebd659dd400423c0640b1d0911da36b441b74..12623bc02f46679674d9bd1c3f1574fc21b57c37 100644 (file)
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x45},
        { X86_VENDOR_INTEL, 6, 0x46},
        { X86_VENDOR_INTEL, 6, 0x4c},
+       { X86_VENDOR_INTEL, 6, 0x4d},
        { X86_VENDOR_INTEL, 6, 0x56},
        {}
 };
index 5580f5b24eb9f59a76ac948c75fa56ca20376323..9013505e43b7358bb98c2b5f4e7b134ae0ce143d 100644 (file)
@@ -309,10 +309,13 @@ static int soc_dts_enable(int id)
        return ret;
 }
 
-static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
+static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max,
+                                             bool notification_support)
 {
        struct soc_sensor_entry *aux_entry;
        char name[10];
+       int trip_count = 0;
+       int trip_mask = 0;
        int err;
 
        aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
@@ -332,11 +335,16 @@ static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
        aux_entry->tj_max = tj_max;
        aux_entry->temp_mask = 0x00FF << (id * 8);
        aux_entry->temp_shift = id * 8;
+       if (notification_support) {
+               trip_count = SOC_MAX_DTS_TRIPS;
+               trip_mask = 0x02;
+       }
        snprintf(name, sizeof(name), "soc_dts%d", id);
        aux_entry->tzone = thermal_zone_device_register(name,
-                       SOC_MAX_DTS_TRIPS,
-                       0x02,
-                       aux_entry, &tzone_ops, NULL, 0, 0);
+                                                       trip_count,
+                                                       trip_mask,
+                                                       aux_entry, &tzone_ops,
+                                                       NULL, 0, 0);
        if (IS_ERR(aux_entry->tzone)) {
                err = PTR_ERR(aux_entry->tzone);
                goto err_ret;
@@ -402,6 +410,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
 
 static const struct x86_cpu_id soc_thermal_ids[] = {
        { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
+       { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
@@ -420,8 +429,11 @@ static int __init intel_soc_thermal_init(void)
        if (get_tj_max(&tj_max))
                return -EINVAL;
 
+       soc_dts_thres_irq = (int)match_cpu->driver_data;
+
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
-               soc_dts[i] = alloc_soc_dts(i, tj_max);
+               soc_dts[i] = alloc_soc_dts(i, tj_max,
+                                          soc_dts_thres_irq ? true : false);
                if (IS_ERR(soc_dts[i])) {
                        err = PTR_ERR(soc_dts[i]);
                        goto err_free;
@@ -430,15 +442,15 @@ static int __init intel_soc_thermal_init(void)
 
        spin_lock_init(&intr_notify_lock);
 
-       soc_dts_thres_irq = (int)match_cpu->driver_data;
-
-       err = request_threaded_irq(soc_dts_thres_irq, NULL,
-                                       soc_irq_thread_fn,
-                                       IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-                                       "soc_dts", soc_dts);
-       if (err) {
-               pr_err("request_threaded_irq ret %d\n", err);
-               goto err_free;
+       if (soc_dts_thres_irq) {
+               err = request_threaded_irq(soc_dts_thres_irq, NULL,
+                                          soc_irq_thread_fn,
+                                          IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                          "soc_dts", soc_dts);
+               if (err) {
+                       pr_err("request_threaded_irq ret %d\n", err);
+                       goto err_free;
+               }
        }
 
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
@@ -451,7 +463,8 @@ static int __init intel_soc_thermal_init(void)
 
 err_trip_temp:
        i = SOC_MAX_DTS_SENSORS;
-       free_irq(soc_dts_thres_irq, soc_dts);
+       if (soc_dts_thres_irq)
+               free_irq(soc_dts_thres_irq, soc_dts);
 err_free:
        while (--i >= 0)
                free_soc_dts(soc_dts[i]);
@@ -466,7 +479,8 @@ static void __exit intel_soc_thermal_exit(void)
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
                update_trip_temp(soc_dts[i], 0, 0);
 
-       free_irq(soc_dts_thres_irq, soc_dts);
+       if (soc_dts_thres_irq)
+               free_irq(soc_dts_thres_irq, soc_dts);
 
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
                free_soc_dts(soc_dts[i]);
index d717f3dab6f1410fc955daefb0497c2096298b56..668fb1bdea9eff9b0443453430f1de92335c4104 100644 (file)
@@ -497,6 +497,9 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
                if (sensor_specs.np == sensor_np && id == sensor_id) {
                        tzd = thermal_zone_of_add_sensor(child, sensor_np,
                                                         data, ops);
+                       if (!IS_ERR(tzd))
+                               tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
+
                        of_node_put(sensor_specs.np);
                        of_node_put(child);
                        goto exit;
index 2580a4872f90febeb5af00136e16054bb59e4903..fe4e767018c4cf73afa3c53852b6d48191e2a81e 100644 (file)
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (irq) {
-               int ret;
-
                /*
                 * platform has IRQ support.
                 * Then, driver uses common registers
-                */
-
-               ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
-                                      dev_name(dev), common);
-               if (ret) {
-                       dev_err(dev, "irq request failed\n ");
-                       return ret;
-               }
-
-               /*
                 * rcar_has_irq_support() will be enabled
                 */
                res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        }
 
        /* enable temperature comparation */
-       if (irq)
+       if (irq) {
+               ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+                                      dev_name(dev), common);
+               if (ret) {
+                       dev_err(dev, "irq request failed\n ");
+                       goto error_unregister;
+               }
+
                rcar_thermal_common_write(common, ENR, enr_bits);
+       }
 
        platform_set_drvdata(pdev, common);
 
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
 error_unregister:
        rcar_thermal_for_each_priv(priv, common) {
-               thermal_zone_device_unregister(priv->zone);
                if (rcar_has_irq_support(priv))
                        rcar_thermal_irq_disable(priv);
+               thermal_zone_device_unregister(priv->zone);
        }
 
        pm_runtime_put(dev);
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev)
        struct rcar_thermal_priv *priv;
 
        rcar_thermal_for_each_priv(priv, common) {
-               thermal_zone_device_unregister(priv->zone);
                if (rcar_has_irq_support(priv))
                        rcar_thermal_irq_disable(priv);
+               thermal_zone_device_unregister(priv->zone);
        }
 
        pm_runtime_put(dev);
index 9c6ce548e36312f95ca49f6352cf4999a1ab0fe0..3aa46ac7cdbc33765a90279da09fd84507a09d6c 100644 (file)
@@ -193,19 +193,20 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
 
 static long rk_tsadcv2_code_to_temp(u32 code)
 {
-       int high, low, mid;
-
-       low = 0;
-       high = ARRAY_SIZE(v2_code_table) - 1;
-       mid = (high + low) / 2;
-
-       if (code > v2_code_table[low].code || code < v2_code_table[high].code)
-               return 125000; /* No code available, return max temperature */
+       unsigned int low = 0;
+       unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
+       unsigned int mid = (low + high) / 2;
+       unsigned int num;
+       unsigned long denom;
+
+       /* Invalid code, return -EAGAIN */
+       if (code > TSADCV2_DATA_MASK)
+               return -EAGAIN;
 
-       while (low <= high) {
-               if (code >= v2_code_table[mid].code && code <
-                   v2_code_table[mid - 1].code)
-                       return v2_code_table[mid].temp;
+       while (low <= high && mid) {
+               if (code >= v2_code_table[mid].code &&
+                   code < v2_code_table[mid - 1].code)
+                       break;
                else if (code < v2_code_table[mid].code)
                        low = mid + 1;
                else
@@ -213,7 +214,16 @@ static long rk_tsadcv2_code_to_temp(u32 code)
                mid = (low + high) / 2;
        }
 
-       return 125000;
+       /*
+        * The 5C granularity provided by the table is too much. Let's
+        * assume that the relationship between sensor readings and
+        * temperature between 2 table entries is linear and interpolate
+        * to produce less granular result.
+        */
+       num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp;
+       num *= v2_code_table[mid - 1].code - code;
+       denom = v2_code_table[mid - 1].code - v2_code_table[mid].code;
+       return v2_code_table[mid - 1].temp + (num / denom);
 }
 
 /**
index c43306ecc0abbb111dc4c6bfdda5201b64ae1738..c8e35c1a43dcfd19145a6d1e24b132b25b5c6169 100644 (file)
@@ -7,12 +7,3 @@ config EXYNOS_THERMAL
          the TMU, reports temperature and handles cooling action if defined.
          This driver uses the Exynos core thermal APIs and TMU configuration
          data from the supported SoCs.
-
-config EXYNOS_THERMAL_CORE
-       bool "Core thermal framework support for EXYNOS SOCs"
-       depends on EXYNOS_THERMAL
-       help
-         If you say yes here you get support for EXYNOS TMU
-         (Thermal Management Unit) common registration/unregistration
-         functions to the core thermal layer and also to use the generic
-         CPU cooling APIs.
index c09d83095dc2a754770a0bd4cd8c93935aa866e3..1e47d0d89ce06ed28c1202616345a058dd0f7386 100644 (file)
@@ -3,5 +3,3 @@
 #
 obj-$(CONFIG_EXYNOS_THERMAL)                   += exynos_thermal.o
 exynos_thermal-y                               := exynos_tmu.o
-exynos_thermal-y                               += exynos_tmu_data.o
-exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE)   += exynos_thermal_common.o
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
deleted file mode 100644 (file)
index 6dc3815..0000000
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * exynos_thermal_common.c - Samsung EXYNOS common thermal file
- *
- *  Copyright (C) 2013 Samsung Electronics
- *  Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include <linux/cpu_cooling.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-
-#include "exynos_thermal_common.h"
-
-struct exynos_thermal_zone {
-       enum thermal_device_mode mode;
-       struct thermal_zone_device *therm_dev;
-       struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
-       unsigned int cool_dev_size;
-       struct platform_device *exynos4_dev;
-       struct thermal_sensor_conf *sensor_conf;
-       bool bind;
-};
-
-/* Get mode callback functions for thermal zone */
-static int exynos_get_mode(struct thermal_zone_device *thermal,
-                       enum thermal_device_mode *mode)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       if (th_zone)
-               *mode = th_zone->mode;
-       return 0;
-}
-
-/* Set mode callback functions for thermal zone */
-static int exynos_set_mode(struct thermal_zone_device *thermal,
-                       enum thermal_device_mode mode)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       if (!th_zone) {
-               dev_err(&thermal->device,
-                       "thermal zone not registered\n");
-               return 0;
-       }
-
-       mutex_lock(&thermal->lock);
-
-       if (mode == THERMAL_DEVICE_ENABLED &&
-               !th_zone->sensor_conf->trip_data.trigger_falling)
-               thermal->polling_delay = IDLE_INTERVAL;
-       else
-               thermal->polling_delay = 0;
-
-       mutex_unlock(&thermal->lock);
-
-       th_zone->mode = mode;
-       thermal_zone_device_update(thermal);
-       dev_dbg(th_zone->sensor_conf->dev,
-               "thermal polling set for duration=%d msec\n",
-               thermal->polling_delay);
-       return 0;
-}
-
-
-/* Get trip type callback functions for thermal zone */
-static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
-                                enum thermal_trip_type *type)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
-       int trip_type;
-
-       if (trip < 0 || trip >= max_trip)
-               return -EINVAL;
-
-       trip_type = th_zone->sensor_conf->trip_data.trip_type[trip];
-
-       if (trip_type == SW_TRIP)
-               *type = THERMAL_TRIP_CRITICAL;
-       else if (trip_type == THROTTLE_ACTIVE)
-               *type = THERMAL_TRIP_ACTIVE;
-       else if (trip_type == THROTTLE_PASSIVE)
-               *type = THERMAL_TRIP_PASSIVE;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-/* Get trip temperature callback functions for thermal zone */
-static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
-                               unsigned long *temp)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
-
-       if (trip < 0 || trip >= max_trip)
-               return -EINVAL;
-
-       *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
-       /* convert the temperature into millicelsius */
-       *temp = *temp * MCELSIUS;
-
-       return 0;
-}
-
-/* Get critical temperature callback functions for thermal zone */
-static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
-                               unsigned long *temp)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
-       /* Get the temp of highest trip*/
-       return exynos_get_trip_temp(thermal, max_trip - 1, temp);
-}
-
-/* Bind callback functions for thermal zone */
-static int exynos_bind(struct thermal_zone_device *thermal,
-                       struct thermal_cooling_device *cdev)
-{
-       int ret = 0, i, tab_size, level;
-       struct freq_clip_table *tab_ptr, *clip_data;
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
-       tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
-       tab_size = data->cooling_data.freq_clip_count;
-
-       if (tab_ptr == NULL || tab_size == 0)
-               return 0;
-
-       /* find the cooling device registered*/
-       for (i = 0; i < th_zone->cool_dev_size; i++)
-               if (cdev == th_zone->cool_dev[i])
-                       break;
-
-       /* No matching cooling device */
-       if (i == th_zone->cool_dev_size)
-               return 0;
-
-       /* Bind the thermal zone to the cpufreq cooling device */
-       for (i = 0; i < tab_size; i++) {
-               clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
-               level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
-               if (level == THERMAL_CSTATE_INVALID)
-                       return 0;
-               switch (GET_ZONE(i)) {
-               case MONITOR_ZONE:
-               case WARN_ZONE:
-                       if (thermal_zone_bind_cooling_device(thermal, i, cdev,
-                                                               level, 0)) {
-                               dev_err(data->dev,
-                                       "error unbinding cdev inst=%d\n", i);
-                               ret = -EINVAL;
-                       }
-                       th_zone->bind = true;
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-       }
-
-       return ret;
-}
-
-/* Unbind callback functions for thermal zone */
-static int exynos_unbind(struct thermal_zone_device *thermal,
-                       struct thermal_cooling_device *cdev)
-{
-       int ret = 0, i, tab_size;
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
-       if (th_zone->bind == false)
-               return 0;
-
-       tab_size = data->cooling_data.freq_clip_count;
-
-       if (tab_size == 0)
-               return 0;
-
-       /* find the cooling device registered*/
-       for (i = 0; i < th_zone->cool_dev_size; i++)
-               if (cdev == th_zone->cool_dev[i])
-                       break;
-
-       /* No matching cooling device */
-       if (i == th_zone->cool_dev_size)
-               return 0;
-
-       /* Bind the thermal zone to the cpufreq cooling device */
-       for (i = 0; i < tab_size; i++) {
-               switch (GET_ZONE(i)) {
-               case MONITOR_ZONE:
-               case WARN_ZONE:
-                       if (thermal_zone_unbind_cooling_device(thermal, i,
-                                                               cdev)) {
-                               dev_err(data->dev,
-                                       "error unbinding cdev inst=%d\n", i);
-                               ret = -EINVAL;
-                       }
-                       th_zone->bind = false;
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-       }
-       return ret;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_get_temp(struct thermal_zone_device *thermal,
-                       unsigned long *temp)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       void *data;
-
-       if (!th_zone->sensor_conf) {
-               dev_err(&thermal->device,
-                       "Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-       data = th_zone->sensor_conf->driver_data;
-       *temp = th_zone->sensor_conf->read_temperature(data);
-       /* convert the temperature into millicelsius */
-       *temp = *temp * MCELSIUS;
-       return 0;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
-                                               unsigned long temp)
-{
-       void *data;
-       int ret = -EINVAL;
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-
-       if (!th_zone->sensor_conf) {
-               dev_err(&thermal->device,
-                       "Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-       data = th_zone->sensor_conf->driver_data;
-       if (th_zone->sensor_conf->write_emul_temp)
-               ret = th_zone->sensor_conf->write_emul_temp(data, temp);
-       return ret;
-}
-
-/* Get the temperature trend */
-static int exynos_get_trend(struct thermal_zone_device *thermal,
-                       int trip, enum thermal_trend *trend)
-{
-       int ret;
-       unsigned long trip_temp;
-
-       ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
-       if (ret < 0)
-               return ret;
-
-       if (thermal->temperature >= trip_temp)
-               *trend = THERMAL_TREND_RAISE_FULL;
-       else
-               *trend = THERMAL_TREND_DROP_FULL;
-
-       return 0;
-}
-/* Operation callback functions for thermal zone */
-static struct thermal_zone_device_ops exynos_dev_ops = {
-       .bind = exynos_bind,
-       .unbind = exynos_unbind,
-       .get_temp = exynos_get_temp,
-       .set_emul_temp = exynos_set_emul_temp,
-       .get_trend = exynos_get_trend,
-       .get_mode = exynos_get_mode,
-       .set_mode = exynos_set_mode,
-       .get_trip_type = exynos_get_trip_type,
-       .get_trip_temp = exynos_get_trip_temp,
-       .get_crit_temp = exynos_get_crit_temp,
-};
-
-/*
- * This function may be called from interrupt based temperature sensor
- * when threshold is changed.
- */
-void exynos_report_trigger(struct thermal_sensor_conf *conf)
-{
-       unsigned int i;
-       char data[10];
-       char *envp[] = { data, NULL };
-       struct exynos_thermal_zone *th_zone;
-
-       if (!conf || !conf->pzone_data) {
-               pr_err("Invalid temperature sensor configuration data\n");
-               return;
-       }
-
-       th_zone = conf->pzone_data;
-
-       if (th_zone->bind == false) {
-               for (i = 0; i < th_zone->cool_dev_size; i++) {
-                       if (!th_zone->cool_dev[i])
-                               continue;
-                       exynos_bind(th_zone->therm_dev,
-                                       th_zone->cool_dev[i]);
-               }
-       }
-
-       thermal_zone_device_update(th_zone->therm_dev);
-
-       mutex_lock(&th_zone->therm_dev->lock);
-       /* Find the level for which trip happened */
-       for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
-               if (th_zone->therm_dev->last_temperature <
-                       th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
-                       break;
-       }
-
-       if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
-               !th_zone->sensor_conf->trip_data.trigger_falling) {
-               if (i > 0)
-                       th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
-               else
-                       th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
-       }
-
-       snprintf(data, sizeof(data), "%u", i);
-       kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
-       mutex_unlock(&th_zone->therm_dev->lock);
-}
-
-/* Register with the in-kernel thermal management */
-int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
-{
-       int ret;
-       struct exynos_thermal_zone *th_zone;
-
-       if (!sensor_conf || !sensor_conf->read_temperature) {
-               pr_err("Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-
-       th_zone = devm_kzalloc(sensor_conf->dev,
-                               sizeof(struct exynos_thermal_zone), GFP_KERNEL);
-       if (!th_zone)
-               return -ENOMEM;
-
-       th_zone->sensor_conf = sensor_conf;
-       /*
-        * TODO: 1) Handle multiple cooling devices in a thermal zone
-        *       2) Add a flag/name in cooling info to map to specific
-        *       sensor
-        */
-       if (sensor_conf->cooling_data.freq_clip_count > 0) {
-               th_zone->cool_dev[th_zone->cool_dev_size] =
-                               cpufreq_cooling_register(cpu_present_mask);
-               if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
-                       ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
-                       if (ret != -EPROBE_DEFER)
-                               dev_err(sensor_conf->dev,
-                                       "Failed to register cpufreq cooling device: %d\n",
-                                       ret);
-                       goto err_unregister;
-               }
-               th_zone->cool_dev_size++;
-       }
-
-       th_zone->therm_dev = thermal_zone_device_register(
-                       sensor_conf->name, sensor_conf->trip_data.trip_count,
-                       0, th_zone, &exynos_dev_ops, NULL, 0,
-                       sensor_conf->trip_data.trigger_falling ? 0 :
-                       IDLE_INTERVAL);
-
-       if (IS_ERR(th_zone->therm_dev)) {
-               dev_err(sensor_conf->dev,
-                       "Failed to register thermal zone device\n");
-               ret = PTR_ERR(th_zone->therm_dev);
-               goto err_unregister;
-       }
-       th_zone->mode = THERMAL_DEVICE_ENABLED;
-       sensor_conf->pzone_data = th_zone;
-
-       dev_info(sensor_conf->dev,
-               "Exynos: Thermal zone(%s) registered\n", sensor_conf->name);
-
-       return 0;
-
-err_unregister:
-       exynos_unregister_thermal(sensor_conf);
-       return ret;
-}
-
-/* Un-Register with the in-kernel thermal management */
-void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
-{
-       int i;
-       struct exynos_thermal_zone *th_zone;
-
-       if (!sensor_conf || !sensor_conf->pzone_data) {
-               pr_err("Invalid temperature sensor configuration data\n");
-               return;
-       }
-
-       th_zone = sensor_conf->pzone_data;
-
-       thermal_zone_device_unregister(th_zone->therm_dev);
-
-       for (i = 0; i < th_zone->cool_dev_size; ++i)
-               cpufreq_cooling_unregister(th_zone->cool_dev[i]);
-
-       dev_info(sensor_conf->dev,
-               "Exynos: Kernel Thermal management unregistered\n");
-}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
deleted file mode 100644 (file)
index cd44719..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * exynos_thermal_common.h - Samsung EXYNOS common header file
- *
- *  Copyright (C) 2013 Samsung Electronics
- *  Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#ifndef _EXYNOS_THERMAL_COMMON_H
-#define _EXYNOS_THERMAL_COMMON_H
-
-/* In-kernel thermal framework related macros & definations */
-#define SENSOR_NAME_LEN        16
-#define MAX_TRIP_COUNT 8
-#define MAX_COOLING_DEVICE 4
-
-#define ACTIVE_INTERVAL 500
-#define IDLE_INTERVAL 10000
-#define MCELSIUS       1000
-
-/* CPU Zone information */
-#define PANIC_ZONE      4
-#define WARN_ZONE       3
-#define MONITOR_ZONE    2
-#define SAFE_ZONE       1
-
-#define GET_ZONE(trip) (trip + 2)
-#define GET_TRIP(zone) (zone - 2)
-
-enum trigger_type {
-       THROTTLE_ACTIVE = 1,
-       THROTTLE_PASSIVE,
-       SW_TRIP,
-       HW_TRIP,
-};
-
-/**
- * struct freq_clip_table
- * @freq_clip_max: maximum frequency allowed for this cooling state.
- * @temp_level: Temperature level at which the temperature clipping will
- *     happen.
- * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
- *
- * This structure is required to be filled and passed to the
- * cpufreq_cooling_unregister function.
- */
-struct freq_clip_table {
-       unsigned int freq_clip_max;
-       unsigned int temp_level;
-       const struct cpumask *mask_val;
-};
-
-struct thermal_trip_point_conf {
-       int trip_val[MAX_TRIP_COUNT];
-       int trip_type[MAX_TRIP_COUNT];
-       int trip_count;
-       unsigned char trigger_falling;
-};
-
-struct thermal_cooling_conf {
-       struct freq_clip_table freq_data[MAX_TRIP_COUNT];
-       int freq_clip_count;
-};
-
-struct thermal_sensor_conf {
-       char name[SENSOR_NAME_LEN];
-       int (*read_temperature)(void *data);
-       int (*write_emul_temp)(void *drv_data, unsigned long temp);
-       struct thermal_trip_point_conf trip_data;
-       struct thermal_cooling_conf cooling_data;
-       void *driver_data;
-       void *pzone_data;
-       struct device *dev;
-};
-
-/*Functions used exynos based thermal sensor driver*/
-#ifdef CONFIG_EXYNOS_THERMAL_CORE
-void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf);
-int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
-void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf);
-#else
-static inline void
-exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; }
-
-static inline int
-exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; }
-
-static inline void
-exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; }
-
-#endif /* CONFIG_EXYNOS_THERMAL_CORE */
-#endif /* _EXYNOS_THERMAL_COMMON_H */
index d2f1e62a42328095a35efb25ca461875e9f87c9f..1fc54ab911d206f0c4b8e2f9aa01b62f8d1f4a22 100644 (file)
@@ -1,6 +1,10 @@
 /*
  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
  *
+ *  Copyright (C) 2014 Samsung Electronics
+ *  Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ *  Lukasz Majewski <l.majewski@samsung.com>
+ *
  *  Copyright (C) 2011 Samsung Electronics
  *  Donggeun Kim <dg77.kim@samsung.com>
  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
@@ -31,8 +35,8 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
-#include "exynos_thermal_common.h"
 #include "exynos_tmu.h"
+#include "../thermal_core.h"
 
 /* Exynos generic registers */
 #define EXYNOS_TMU_REG_TRIMINFO                0x0
 #define EXYNOS5440_TMU_TH_RISE4_SHIFT          24
 #define EXYNOS5440_EFUSE_SWAP_OFFSET           8
 
+/* Exynos7 specific registers */
+#define EXYNOS7_THD_TEMP_RISE7_6               0x50
+#define EXYNOS7_THD_TEMP_FALL7_6               0x60
+#define EXYNOS7_TMU_REG_INTEN                  0x110
+#define EXYNOS7_TMU_REG_INTPEND                        0x118
+#define EXYNOS7_TMU_REG_EMUL_CON               0x160
+
+#define EXYNOS7_TMU_TEMP_MASK                  0x1ff
+#define EXYNOS7_PD_DET_EN_SHIFT                        23
+#define EXYNOS7_TMU_INTEN_RISE0_SHIFT          0
+#define EXYNOS7_TMU_INTEN_RISE1_SHIFT          1
+#define EXYNOS7_TMU_INTEN_RISE2_SHIFT          2
+#define EXYNOS7_TMU_INTEN_RISE3_SHIFT          3
+#define EXYNOS7_TMU_INTEN_RISE4_SHIFT          4
+#define EXYNOS7_TMU_INTEN_RISE5_SHIFT          5
+#define EXYNOS7_TMU_INTEN_RISE6_SHIFT          6
+#define EXYNOS7_TMU_INTEN_RISE7_SHIFT          7
+#define EXYNOS7_EMUL_DATA_SHIFT                        7
+#define EXYNOS7_EMUL_DATA_MASK                 0x1ff
+
+#define MCELSIUS       1000
 /**
  * struct exynos_tmu_data : A structure to hold the private data of the TMU
        driver
  * @lock: lock to implement synchronization.
  * @clk: pointer to the clock structure.
  * @clk_sec: pointer to the clock structure for accessing the base_second.
+ * @sclk: pointer to the clock structure for accessing the tmu special clk.
  * @temp_error1: fused value of the first point trim.
  * @temp_error2: fused value of the second point trim.
  * @regulator: pointer to the TMU regulator structure.
@@ -147,10 +173,11 @@ struct exynos_tmu_data {
        enum soc_type soc;
        struct work_struct irq_work;
        struct mutex lock;
-       struct clk *clk, *clk_sec;
-       u8 temp_error1, temp_error2;
+       struct clk *clk, *clk_sec, *sclk;
+       u16 temp_error1, temp_error2;
        struct regulator *regulator;
-       struct thermal_sensor_conf *reg_conf;
+       struct thermal_zone_device *tzd;
+
        int (*tmu_initialize)(struct platform_device *pdev);
        void (*tmu_control)(struct platform_device *pdev, bool on);
        int (*tmu_read)(struct exynos_tmu_data *data);
@@ -159,6 +186,33 @@ struct exynos_tmu_data {
        void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
 };
 
+static void exynos_report_trigger(struct exynos_tmu_data *p)
+{
+       char data[10], *envp[] = { data, NULL };
+       struct thermal_zone_device *tz = p->tzd;
+       unsigned long temp;
+       unsigned int i;
+
+       if (!tz) {
+               pr_err("No thermal zone device defined\n");
+               return;
+       }
+
+       thermal_zone_device_update(tz);
+
+       mutex_lock(&tz->lock);
+       /* Find the level for which trip happened */
+       for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+               tz->ops->get_trip_temp(tz, i, &temp);
+               if (tz->last_temperature < temp)
+                       break;
+       }
+
+       snprintf(data, sizeof(data), "%u", i);
+       kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
+       mutex_unlock(&tz->lock);
+}
+
 /*
  * TMU treats temperature as a mapped temperature code.
  * The temperature is converted differently depending on the calibration type.
@@ -190,7 +244,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
  * Calculate a temperature value from a temperature code.
  * The unit of the temperature is degree Celsius.
  */
-static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
+static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
 {
        struct exynos_tmu_platform_data *pdata = data->pdata;
        int temp;
@@ -234,14 +288,25 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
 
 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
 {
-       struct exynos_tmu_platform_data *pdata = data->pdata;
+       struct thermal_zone_device *tz = data->tzd;
+       const struct thermal_trip * const trips =
+               of_thermal_get_trip_points(tz);
+       unsigned long temp;
        int i;
 
-       for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
-               u8 temp = pdata->trigger_levels[i];
+       if (!trips) {
+               pr_err("%s: Cannot get trip points from of-thermal.c!\n",
+                      __func__);
+               return 0;
+       }
+
+       for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+               if (trips[i].type == THERMAL_TRIP_CRITICAL)
+                       continue;
 
+               temp = trips[i].temperature / MCELSIUS;
                if (falling)
-                       temp -= pdata->threshold_falling;
+                       temp -= (trips[i].hysteresis / MCELSIUS);
                else
                        threshold &= ~(0xff << 8 * i);
 
@@ -305,9 +370,19 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
 static int exynos4210_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
-       unsigned int status;
+       struct thermal_zone_device *tz = data->tzd;
+       const struct thermal_trip * const trips =
+               of_thermal_get_trip_points(tz);
        int ret = 0, threshold_code, i;
+       unsigned long reference, temp;
+       unsigned int status;
+
+       if (!trips) {
+               pr_err("%s: Cannot get trip points from of-thermal.c!\n",
+                      __func__);
+               ret = -ENODEV;
+               goto out;
+       }
 
        status = readb(data->base + EXYNOS_TMU_REG_STATUS);
        if (!status) {
@@ -318,12 +393,19 @@ static int exynos4210_tmu_initialize(struct platform_device *pdev)
        sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
 
        /* Write temperature code for threshold */
-       threshold_code = temp_to_code(data, pdata->threshold);
+       reference = trips[0].temperature / MCELSIUS;
+       threshold_code = temp_to_code(data, reference);
+       if (threshold_code < 0) {
+               ret = threshold_code;
+               goto out;
+       }
        writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
 
-       for (i = 0; i < pdata->non_hw_trigger_levels; i++)
-               writeb(pdata->trigger_levels[i], data->base +
+       for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+               temp = trips[i].temperature / MCELSIUS;
+               writeb(temp - reference, data->base +
                       EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
+       }
 
        data->tmu_clear_irqs(data);
 out:
@@ -333,9 +415,11 @@ out:
 static int exynos4412_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct thermal_trip * const trips =
+               of_thermal_get_trip_points(data->tzd);
        unsigned int status, trim_info, con, ctrl, rising_threshold;
        int ret = 0, threshold_code, i;
+       unsigned long crit_temp = 0;
 
        status = readb(data->base + EXYNOS_TMU_REG_STATUS);
        if (!status) {
@@ -373,17 +457,29 @@ static int exynos4412_tmu_initialize(struct platform_device *pdev)
        data->tmu_clear_irqs(data);
 
        /* if last threshold limit is also present */
-       i = pdata->max_trigger_level - 1;
-       if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
-               threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
-               /* 1-4 level to be assigned in th0 reg */
-               rising_threshold &= ~(0xff << 8 * i);
-               rising_threshold |= threshold_code << 8 * i;
-               writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
-               con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
-               con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
-               writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+       for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
+               if (trips[i].type == THERMAL_TRIP_CRITICAL) {
+                       crit_temp = trips[i].temperature;
+                       break;
+               }
+       }
+
+       if (i == of_thermal_get_ntrips(data->tzd)) {
+               pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
+                      __func__);
+               ret = -EINVAL;
+               goto out;
        }
+
+       threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
+       /* 1-4 level to be assigned in th0 reg */
+       rising_threshold &= ~(0xff << 8 * i);
+       rising_threshold |= threshold_code << 8 * i;
+       writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
+       con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
+       con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
+       writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+
 out:
        return ret;
 }
@@ -391,9 +487,9 @@ out:
 static int exynos5440_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
        unsigned int trim_info = 0, con, rising_threshold;
-       int ret = 0, threshold_code, i;
+       int ret = 0, threshold_code;
+       unsigned long crit_temp = 0;
 
        /*
         * For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -422,9 +518,8 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
        data->tmu_clear_irqs(data);
 
        /* if last threshold limit is also present */
-       i = pdata->max_trigger_level - 1;
-       if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
-               threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
+       if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
+               threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
                /* 5th level to be assigned in th2 reg */
                rising_threshold =
                        threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
@@ -439,10 +534,88 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
        return ret;
 }
 
-static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
+static int exynos7_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tzd;
        struct exynos_tmu_platform_data *pdata = data->pdata;
+       unsigned int status, trim_info;
+       unsigned int rising_threshold = 0, falling_threshold = 0;
+       int ret = 0, threshold_code, i;
+       unsigned long temp, temp_hist;
+       unsigned int reg_off, bit_off;
+
+       status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+       if (!status) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
+
+       data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
+       if (!data->temp_error1 ||
+           (pdata->min_efuse_value > data->temp_error1) ||
+           (data->temp_error1 > pdata->max_efuse_value))
+               data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
+
+       /* Write temperature code for rising and falling threshold */
+       for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
+               /*
+                * On exynos7 there are 4 rising and 4 falling threshold
+                * registers (0x50-0x5c and 0x60-0x6c respectively). Each
+                * register holds the value of two threshold levels (at bit
+                * offsets 0 and 16). Based on the fact that there are atmost
+                * eight possible trigger levels, calculate the register and
+                * bit offsets where the threshold levels are to be written.
+                *
+                * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
+                * [24:16] - Threshold level 7
+                * [8:0] - Threshold level 6
+                * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
+                * [24:16] - Threshold level 5
+                * [8:0] - Threshold level 4
+                *
+                * and similarly for falling thresholds.
+                *
+                * Based on the above, calculate the register and bit offsets
+                * for rising/falling threshold levels and populate them.
+                */
+               reg_off = ((7 - i) / 2) * 4;
+               bit_off = ((8 - i) % 2);
+
+               tz->ops->get_trip_temp(tz, i, &temp);
+               temp /= MCELSIUS;
+
+               tz->ops->get_trip_hyst(tz, i, &temp_hist);
+               temp_hist = temp - (temp_hist / MCELSIUS);
+
+               /* Set 9-bit temperature code for rising threshold levels */
+               threshold_code = temp_to_code(data, temp);
+               rising_threshold = readl(data->base +
+                       EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
+               rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
+               rising_threshold |= threshold_code << (16 * bit_off);
+               writel(rising_threshold,
+                      data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
+
+               /* Set 9-bit temperature code for falling threshold levels */
+               threshold_code = temp_to_code(data, temp_hist);
+               falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
+               falling_threshold |= threshold_code << (16 * bit_off);
+               writel(falling_threshold,
+                      data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
+       }
+
+       data->tmu_clear_irqs(data);
+out:
+       return ret;
+}
+
+static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
+{
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tzd;
        unsigned int con, interrupt_en;
 
        con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
@@ -450,10 +623,15 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
        if (on) {
                con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en =
-                       pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT |
-                       pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT |
-                       pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT |
-                       pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT;
+                       (of_thermal_is_trip_valid(tz, 3)
+                        << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 2)
+                        << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 1)
+                        << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 0)
+                        << EXYNOS_TMU_INTEN_RISE0_SHIFT);
+
                if (data->soc != SOC_ARCH_EXYNOS4210)
                        interrupt_en |=
                                interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
@@ -468,7 +646,7 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
+       struct thermal_zone_device *tz = data->tzd;
        unsigned int con, interrupt_en;
 
        con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
@@ -476,11 +654,16 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
        if (on) {
                con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en =
-                       pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT |
-                       pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT |
-                       pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT |
-                       pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT;
-               interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
+                       (of_thermal_is_trip_valid(tz, 3)
+                        << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 2)
+                        << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 1)
+                        << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 0)
+                        << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
+               interrupt_en |=
+                       interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
        } else {
                con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en = 0; /* Disable all interrupts */
@@ -489,19 +672,62 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
        writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
 }
 
-static int exynos_tmu_read(struct exynos_tmu_data *data)
+static void exynos7_tmu_control(struct platform_device *pdev, bool on)
 {
-       int ret;
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tzd;
+       unsigned int con, interrupt_en;
+
+       con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
+
+       if (on) {
+               con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+               interrupt_en =
+                       (of_thermal_is_trip_valid(tz, 7)
+                       << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 6)
+                       << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 5)
+                       << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 4)
+                       << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 3)
+                       << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 2)
+                       << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 1)
+                       << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 0)
+                       << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
+
+               interrupt_en |=
+                       interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
+       } else {
+               con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+               interrupt_en = 0; /* Disable all interrupts */
+       }
+       con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
+
+       writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
+       writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+}
+
+static int exynos_get_temp(void *p, long *temp)
+{
+       struct exynos_tmu_data *data = p;
+
+       if (!data || !data->tmu_read)
+               return -EINVAL;
 
        mutex_lock(&data->lock);
        clk_enable(data->clk);
-       ret = data->tmu_read(data);
-       if (ret >= 0)
-               ret = code_to_temp(data, ret);
+
+       *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
 
-       return ret;
+       return 0;
 }
 
 #ifdef CONFIG_THERMAL_EMULATION
@@ -515,9 +741,19 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
                        val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
                        val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
                }
-               val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
-               val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
-                       EXYNOS_EMUL_ENABLE;
+               if (data->soc == SOC_ARCH_EXYNOS7) {
+                       val &= ~(EXYNOS7_EMUL_DATA_MASK <<
+                               EXYNOS7_EMUL_DATA_SHIFT);
+                       val |= (temp_to_code(data, temp) <<
+                               EXYNOS7_EMUL_DATA_SHIFT) |
+                               EXYNOS_EMUL_ENABLE;
+               } else {
+                       val &= ~(EXYNOS_EMUL_DATA_MASK <<
+                               EXYNOS_EMUL_DATA_SHIFT);
+                       val |= (temp_to_code(data, temp) <<
+                               EXYNOS_EMUL_DATA_SHIFT) |
+                               EXYNOS_EMUL_ENABLE;
+               }
        } else {
                val &= ~EXYNOS_EMUL_ENABLE;
        }
@@ -533,6 +769,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
 
        if (data->soc == SOC_ARCH_EXYNOS5260)
                emul_con = EXYNOS5260_EMUL_CON;
+       else if (data->soc == SOC_ARCH_EXYNOS7)
+               emul_con = EXYNOS7_TMU_REG_EMUL_CON;
        else
                emul_con = EXYNOS_EMUL_CON;
 
@@ -576,7 +814,7 @@ out:
 #define exynos5440_tmu_set_emulation NULL
 static int exynos_tmu_set_emulation(void *drv_data,    unsigned long temp)
        { return -EINVAL; }
-#endif/*CONFIG_THERMAL_EMULATION*/
+#endif /* CONFIG_THERMAL_EMULATION */
 
 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
 {
@@ -596,6 +834,12 @@ static int exynos5440_tmu_read(struct exynos_tmu_data *data)
        return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
 }
 
+static int exynos7_tmu_read(struct exynos_tmu_data *data)
+{
+       return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
+               EXYNOS7_TMU_TEMP_MASK;
+}
+
 static void exynos_tmu_work(struct work_struct *work)
 {
        struct exynos_tmu_data *data = container_of(work,
@@ -613,7 +857,7 @@ static void exynos_tmu_work(struct work_struct *work)
        if (!IS_ERR(data->clk_sec))
                clk_disable(data->clk_sec);
 
-       exynos_report_trigger(data->reg_conf);
+       exynos_report_trigger(data);
        mutex_lock(&data->lock);
        clk_enable(data->clk);
 
@@ -634,6 +878,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
        if (data->soc == SOC_ARCH_EXYNOS5260) {
                tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
                tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
+       } else if (data->soc == SOC_ARCH_EXYNOS7) {
+               tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
+               tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
        } else {
                tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
                tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
@@ -671,57 +918,78 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
 }
 
 static const struct of_device_id exynos_tmu_match[] = {
-       {
-               .compatible = "samsung,exynos3250-tmu",
-               .data = &exynos3250_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos4210-tmu",
-               .data = &exynos4210_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos4412-tmu",
-               .data = &exynos4412_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos5250-tmu",
-               .data = &exynos5250_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos5260-tmu",
-               .data = &exynos5260_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos5420-tmu",
-               .data = &exynos5420_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos5420-tmu-ext-triminfo",
-               .data = &exynos5420_default_tmu_data,
-       },
-       {
-               .compatible = "samsung,exynos5440-tmu",
-               .data = &exynos5440_default_tmu_data,
-       },
-       {},
+       { .compatible = "samsung,exynos3250-tmu", },
+       { .compatible = "samsung,exynos4210-tmu", },
+       { .compatible = "samsung,exynos4412-tmu", },
+       { .compatible = "samsung,exynos5250-tmu", },
+       { .compatible = "samsung,exynos5260-tmu", },
+       { .compatible = "samsung,exynos5420-tmu", },
+       { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
+       { .compatible = "samsung,exynos5440-tmu", },
+       { .compatible = "samsung,exynos7-tmu", },
+       { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
 
-static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
-                       struct platform_device *pdev, int id)
+static int exynos_of_get_soc_type(struct device_node *np)
+{
+       if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
+               return SOC_ARCH_EXYNOS3250;
+       else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
+               return SOC_ARCH_EXYNOS4210;
+       else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
+               return SOC_ARCH_EXYNOS4412;
+       else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
+               return SOC_ARCH_EXYNOS5250;
+       else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
+               return SOC_ARCH_EXYNOS5260;
+       else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
+               return SOC_ARCH_EXYNOS5420;
+       else if (of_device_is_compatible(np,
+                                        "samsung,exynos5420-tmu-ext-triminfo"))
+               return SOC_ARCH_EXYNOS5420_TRIMINFO;
+       else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
+               return SOC_ARCH_EXYNOS5440;
+       else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
+               return SOC_ARCH_EXYNOS7;
+
+       return -EINVAL;
+}
+
+static int exynos_of_sensor_conf(struct device_node *np,
+                                struct exynos_tmu_platform_data *pdata)
 {
-       struct  exynos_tmu_init_data *data_table;
-       struct exynos_tmu_platform_data *tmu_data;
-       const struct of_device_id *match;
+       u32 value;
+       int ret;
 
-       match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
-       if (!match)
-               return NULL;
-       data_table = (struct exynos_tmu_init_data *) match->data;
-       if (!data_table || id >= data_table->tmu_count)
-               return NULL;
-       tmu_data = data_table->tmu_data;
-       return (struct exynos_tmu_platform_data *) (tmu_data + id);
+       of_node_get(np);
+
+       ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
+       pdata->gain = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
+       pdata->reference_voltage = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
+       pdata->noise_cancel_mode = (u8)value;
+
+       of_property_read_u32(np, "samsung,tmu_efuse_value",
+                            &pdata->efuse_value);
+       of_property_read_u32(np, "samsung,tmu_min_efuse_value",
+                            &pdata->min_efuse_value);
+       of_property_read_u32(np, "samsung,tmu_max_efuse_value",
+                            &pdata->max_efuse_value);
+
+       of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
+       pdata->first_point_trim = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
+       pdata->second_point_trim = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
+       pdata->default_temp_offset = (u8)value;
+
+       of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
+       of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
+
+       of_node_put(np);
+       return 0;
 }
 
 static int exynos_map_dt_data(struct platform_device *pdev)
@@ -771,14 +1039,15 @@ static int exynos_map_dt_data(struct platform_device *pdev)
                return -EADDRNOTAVAIL;
        }
 
-       pdata = exynos_get_driver_data(pdev, data->id);
-       if (!pdata) {
-               dev_err(&pdev->dev, "No platform init data supplied.\n");
-               return -ENODEV;
-       }
+       pdata = devm_kzalloc(&pdev->dev,
+                            sizeof(struct exynos_tmu_platform_data),
+                            GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
 
+       exynos_of_sensor_conf(pdev->dev.of_node, pdata);
        data->pdata = pdata;
-       data->soc = pdata->type;
+       data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
 
        switch (data->soc) {
        case SOC_ARCH_EXYNOS4210:
@@ -806,6 +1075,13 @@ static int exynos_map_dt_data(struct platform_device *pdev)
                data->tmu_set_emulation = exynos5440_tmu_set_emulation;
                data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
                break;
+       case SOC_ARCH_EXYNOS7:
+               data->tmu_initialize = exynos7_tmu_initialize;
+               data->tmu_control = exynos7_tmu_control;
+               data->tmu_read = exynos7_tmu_read;
+               data->tmu_set_emulation = exynos4412_tmu_set_emulation;
+               data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+               break;
        default:
                dev_err(&pdev->dev, "Platform not supported\n");
                return -EINVAL;
@@ -834,12 +1110,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
        return 0;
 }
 
+static struct thermal_zone_of_device_ops exynos_sensor_ops = {
+       .get_temp = exynos_get_temp,
+       .set_emul_temp = exynos_tmu_set_emulation,
+};
+
 static int exynos_tmu_probe(struct platform_device *pdev)
 {
-       struct exynos_tmu_data *data;
        struct exynos_tmu_platform_data *pdata;
-       struct thermal_sensor_conf *sensor_conf;
-       int ret, i;
+       struct exynos_tmu_data *data;
+       int ret;
 
        data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
                                        GFP_KERNEL);
@@ -849,9 +1129,15 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, data);
        mutex_init(&data->lock);
 
+       data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
+                                                   &exynos_sensor_ops);
+       if (IS_ERR(data->tzd)) {
+               pr_err("thermal: tz: %p ERROR\n", data->tzd);
+               return PTR_ERR(data->tzd);
+       }
        ret = exynos_map_dt_data(pdev);
        if (ret)
-               return ret;
+               goto err_sensor;
 
        pdata = data->pdata;
 
@@ -860,20 +1146,22 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
        if (IS_ERR(data->clk)) {
                dev_err(&pdev->dev, "Failed to get clock\n");
-               return  PTR_ERR(data->clk);
+               ret = PTR_ERR(data->clk);
+               goto err_sensor;
        }
 
        data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
        if (IS_ERR(data->clk_sec)) {
                if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
                        dev_err(&pdev->dev, "Failed to get triminfo clock\n");
-                       return PTR_ERR(data->clk_sec);
+                       ret = PTR_ERR(data->clk_sec);
+                       goto err_sensor;
                }
        } else {
                ret = clk_prepare(data->clk_sec);
                if (ret) {
                        dev_err(&pdev->dev, "Failed to get clock\n");
-                       return ret;
+                       goto err_sensor;
                }
        }
 
@@ -883,82 +1171,57 @@ static int exynos_tmu_probe(struct platform_device *pdev)
                goto err_clk_sec;
        }
 
-       ret = exynos_tmu_initialize(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to initialize TMU\n");
-               goto err_clk;
+       if (data->soc == SOC_ARCH_EXYNOS7) {
+               data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
+               if (IS_ERR(data->sclk)) {
+                       dev_err(&pdev->dev, "Failed to get sclk\n");
+                       goto err_clk;
+               } else {
+                       ret = clk_prepare_enable(data->sclk);
+                       if (ret) {
+                               dev_err(&pdev->dev, "Failed to enable sclk\n");
+                               goto err_clk;
+                       }
+               }
        }
 
-       exynos_tmu_control(pdev, true);
-
-       /* Allocate a structure to register with the exynos core thermal */
-       sensor_conf = devm_kzalloc(&pdev->dev,
-                               sizeof(struct thermal_sensor_conf), GFP_KERNEL);
-       if (!sensor_conf) {
-               ret = -ENOMEM;
-               goto err_clk;
-       }
-       sprintf(sensor_conf->name, "therm_zone%d", data->id);
-       sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
-       sensor_conf->write_emul_temp =
-               (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
-       sensor_conf->driver_data = data;
-       sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
-                       pdata->trigger_enable[1] + pdata->trigger_enable[2]+
-                       pdata->trigger_enable[3];
-
-       for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
-               sensor_conf->trip_data.trip_val[i] =
-                       pdata->threshold + pdata->trigger_levels[i];
-               sensor_conf->trip_data.trip_type[i] =
-                                       pdata->trigger_type[i];
-       }
-
-       sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
-
-       sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
-       for (i = 0; i < pdata->freq_tab_count; i++) {
-               sensor_conf->cooling_data.freq_data[i].freq_clip_max =
-                                       pdata->freq_tab[i].freq_clip_max;
-               sensor_conf->cooling_data.freq_data[i].temp_level =
-                                       pdata->freq_tab[i].temp_level;
-       }
-       sensor_conf->dev = &pdev->dev;
-       /* Register the sensor with thermal management interface */
-       ret = exynos_register_thermal(sensor_conf);
+       ret = exynos_tmu_initialize(pdev);
        if (ret) {
-               if (ret != -EPROBE_DEFER)
-                       dev_err(&pdev->dev,
-                               "Failed to register thermal interface: %d\n",
-                               ret);
-               goto err_clk;
+               dev_err(&pdev->dev, "Failed to initialize TMU\n");
+               goto err_sclk;
        }
-       data->reg_conf = sensor_conf;
 
        ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
                IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
-               goto err_clk;
+               goto err_sclk;
        }
 
+       exynos_tmu_control(pdev, true);
        return 0;
+err_sclk:
+       clk_disable_unprepare(data->sclk);
 err_clk:
        clk_unprepare(data->clk);
 err_clk_sec:
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
+err_sensor:
+       thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
+
        return ret;
 }
 
 static int exynos_tmu_remove(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tzd = data->tzd;
 
-       exynos_unregister_thermal(data->reg_conf);
-
+       thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
        exynos_tmu_control(pdev, false);
 
+       clk_disable_unprepare(data->sclk);
        clk_unprepare(data->clk);
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
index da3009bff6c439d013f21710b05792fc86b146b0..4d71ec6c9aa0ba10b86c3975d92d5ebcea00d543 100644 (file)
 #ifndef _EXYNOS_TMU_H
 #define _EXYNOS_TMU_H
 #include <linux/cpu_cooling.h>
-
-#include "exynos_thermal_common.h"
-
-enum calibration_type {
-       TYPE_ONE_POINT_TRIMMING,
-       TYPE_ONE_POINT_TRIMMING_25,
-       TYPE_ONE_POINT_TRIMMING_85,
-       TYPE_TWO_POINT_TRIMMING,
-       TYPE_NONE,
-};
+#include <dt-bindings/thermal/thermal_exynos.h>
 
 enum soc_type {
        SOC_ARCH_EXYNOS3250 = 1,
@@ -43,38 +34,11 @@ enum soc_type {
        SOC_ARCH_EXYNOS5420,
        SOC_ARCH_EXYNOS5420_TRIMINFO,
        SOC_ARCH_EXYNOS5440,
+       SOC_ARCH_EXYNOS7,
 };
 
 /**
  * struct exynos_tmu_platform_data
- * @threshold: basic temperature for generating interrupt
- *            25 <= threshold <= 125 [unit: degree Celsius]
- * @threshold_falling: differntial value for setting threshold
- *                    of temperature falling interrupt.
- * @trigger_levels: array for each interrupt levels
- *     [unit: degree Celsius]
- *     0: temperature for trigger_level0 interrupt
- *        condition for trigger_level0 interrupt:
- *             current temperature > threshold + trigger_levels[0]
- *     1: temperature for trigger_level1 interrupt
- *        condition for trigger_level1 interrupt:
- *             current temperature > threshold + trigger_levels[1]
- *     2: temperature for trigger_level2 interrupt
- *        condition for trigger_level2 interrupt:
- *             current temperature > threshold + trigger_levels[2]
- *     3: temperature for trigger_level3 interrupt
- *        condition for trigger_level3 interrupt:
- *             current temperature > threshold + trigger_levels[3]
- * @trigger_type: defines the type of trigger. Possible values are,
- *     THROTTLE_ACTIVE trigger type
- *     THROTTLE_PASSIVE trigger type
- *     SW_TRIP trigger type
- *     HW_TRIP
- * @trigger_enable[]: array to denote which trigger levels are enabled.
- *     1 = enable trigger_level[] interrupt,
- *     0 = disable trigger_level[] interrupt
- * @max_trigger_level: max trigger level supported by the TMU
- * @non_hw_trigger_levels: number of defined non-hardware trigger levels
  * @gain: gain of amplifier in the positive-TC generator block
  *     0 < gain <= 15
  * @reference_voltage: reference voltage of amplifier
@@ -86,24 +50,12 @@ enum soc_type {
  * @efuse_value: platform defined fuse value
  * @min_efuse_value: minimum valid trimming data
  * @max_efuse_value: maximum valid trimming data
- * @first_point_trim: temp value of the first point trimming
- * @second_point_trim: temp value of the second point trimming
  * @default_temp_offset: default temperature offset in case of no trimming
  * @cal_type: calibration type for temperature
- * @freq_clip_table: Table representing frequency reduction percentage.
- * @freq_tab_count: Count of the above table as frequency reduction may
- *     applicable to only some of the trigger levels.
  *
  * This structure is required for configuration of exynos_tmu driver.
  */
 struct exynos_tmu_platform_data {
-       u8 threshold;
-       u8 threshold_falling;
-       u8 trigger_levels[MAX_TRIP_COUNT];
-       enum trigger_type trigger_type[MAX_TRIP_COUNT];
-       bool trigger_enable[MAX_TRIP_COUNT];
-       u8 max_trigger_level;
-       u8 non_hw_trigger_levels;
        u8 gain;
        u8 reference_voltage;
        u8 noise_cancel_mode;
@@ -115,30 +67,9 @@ struct exynos_tmu_platform_data {
        u8 second_point_trim;
        u8 default_temp_offset;
 
-       enum calibration_type cal_type;
        enum soc_type type;
-       struct freq_clip_table freq_tab[4];
-       unsigned int freq_tab_count;
-};
-
-/**
- * struct exynos_tmu_init_data
- * @tmu_count: number of TMU instances.
- * @tmu_data: platform data of all TMU instances.
- * This structure is required to store data for multi-instance exynos tmu
- * driver.
- */
-struct exynos_tmu_init_data {
-       int tmu_count;
-       struct exynos_tmu_platform_data tmu_data[];
+       u32 cal_type;
+       u32 cal_mode;
 };
 
-extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
-
 #endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
deleted file mode 100644 (file)
index b239100..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * exynos_tmu_data.c - Samsung EXYNOS tmu data file
- *
- *  Copyright (C) 2013 Samsung Electronics
- *  Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include "exynos_thermal_common.h"
-#include "exynos_tmu.h"
-
-struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
-       .tmu_data = {
-               {
-               .threshold = 80,
-               .trigger_levels[0] = 5,
-               .trigger_levels[1] = 20,
-               .trigger_levels[2] = 30,
-               .trigger_enable[0] = true,
-               .trigger_enable[1] = true,
-               .trigger_enable[2] = true,
-               .trigger_enable[3] = false,
-               .trigger_type[0] = THROTTLE_ACTIVE,
-               .trigger_type[1] = THROTTLE_ACTIVE,
-               .trigger_type[2] = SW_TRIP,
-               .max_trigger_level = 4,
-               .non_hw_trigger_levels = 3,
-               .gain = 15,
-               .reference_voltage = 7,
-               .cal_type = TYPE_ONE_POINT_TRIMMING,
-               .min_efuse_value = 40,
-               .max_efuse_value = 100,
-               .first_point_trim = 25,
-               .second_point_trim = 85,
-               .default_temp_offset = 50,
-               .freq_tab[0] = {
-                       .freq_clip_max = 800 * 1000,
-                       .temp_level = 85,
-                       },
-               .freq_tab[1] = {
-                       .freq_clip_max = 200 * 1000,
-                       .temp_level = 100,
-               },
-               .freq_tab_count = 2,
-               .type = SOC_ARCH_EXYNOS4210,
-               },
-       },
-       .tmu_count = 1,
-};
-
-#define EXYNOS3250_TMU_DATA \
-       .threshold_falling = 10, \
-       .trigger_levels[0] = 70, \
-       .trigger_levels[1] = 95, \
-       .trigger_levels[2] = 110, \
-       .trigger_levels[3] = 120, \
-       .trigger_enable[0] = true, \
-       .trigger_enable[1] = true, \
-       .trigger_enable[2] = true, \
-       .trigger_enable[3] = false, \
-       .trigger_type[0] = THROTTLE_ACTIVE, \
-       .trigger_type[1] = THROTTLE_ACTIVE, \
-       .trigger_type[2] = SW_TRIP, \
-       .trigger_type[3] = HW_TRIP, \
-       .max_trigger_level = 4, \
-       .non_hw_trigger_levels = 3, \
-       .gain = 8, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_TWO_POINT_TRIMMING, \
-       .efuse_value = 55, \
-       .min_efuse_value = 40, \
-       .max_efuse_value = 100, \
-       .first_point_trim = 25, \
-       .second_point_trim = 85, \
-       .default_temp_offset = 50, \
-       .freq_tab[0] = { \
-               .freq_clip_max = 800 * 1000, \
-               .temp_level = 70, \
-       }, \
-       .freq_tab[1] = { \
-               .freq_clip_max = 400 * 1000, \
-               .temp_level = 95, \
-       }, \
-       .freq_tab_count = 2
-
-struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
-       .tmu_data = {
-               {
-                       EXYNOS3250_TMU_DATA,
-                       .type = SOC_ARCH_EXYNOS3250,
-               },
-       },
-       .tmu_count = 1,
-};
-
-#define EXYNOS4412_TMU_DATA \
-       .threshold_falling = 10, \
-       .trigger_levels[0] = 70, \
-       .trigger_levels[1] = 95, \
-       .trigger_levels[2] = 110, \
-       .trigger_levels[3] = 120, \
-       .trigger_enable[0] = true, \
-       .trigger_enable[1] = true, \
-       .trigger_enable[2] = true, \
-       .trigger_enable[3] = false, \
-       .trigger_type[0] = THROTTLE_ACTIVE, \
-       .trigger_type[1] = THROTTLE_ACTIVE, \
-       .trigger_type[2] = SW_TRIP, \
-       .trigger_type[3] = HW_TRIP, \
-       .max_trigger_level = 4, \
-       .non_hw_trigger_levels = 3, \
-       .gain = 8, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .efuse_value = 55, \
-       .min_efuse_value = 40, \
-       .max_efuse_value = 100, \
-       .first_point_trim = 25, \
-       .second_point_trim = 85, \
-       .default_temp_offset = 50, \
-       .freq_tab[0] = { \
-               .freq_clip_max = 1400 * 1000, \
-               .temp_level = 70, \
-       }, \
-       .freq_tab[1] = { \
-               .freq_clip_max = 400 * 1000, \
-               .temp_level = 95, \
-       }, \
-       .freq_tab_count = 2
-
-struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
-       .tmu_data = {
-               {
-                       EXYNOS4412_TMU_DATA,
-                       .type = SOC_ARCH_EXYNOS4412,
-               },
-       },
-       .tmu_count = 1,
-};
-
-struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
-       .tmu_data = {
-               {
-                       EXYNOS4412_TMU_DATA,
-                       .type = SOC_ARCH_EXYNOS5250,
-               },
-       },
-       .tmu_count = 1,
-};
-
-#define __EXYNOS5260_TMU_DATA  \
-       .threshold_falling = 10, \
-       .trigger_levels[0] = 85, \
-       .trigger_levels[1] = 103, \
-       .trigger_levels[2] = 110, \
-       .trigger_levels[3] = 120, \
-       .trigger_enable[0] = true, \
-       .trigger_enable[1] = true, \
-       .trigger_enable[2] = true, \
-       .trigger_enable[3] = false, \
-       .trigger_type[0] = THROTTLE_ACTIVE, \
-       .trigger_type[1] = THROTTLE_ACTIVE, \
-       .trigger_type[2] = SW_TRIP, \
-       .trigger_type[3] = HW_TRIP, \
-       .max_trigger_level = 4, \
-       .non_hw_trigger_levels = 3, \
-       .gain = 8, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .efuse_value = 55, \
-       .min_efuse_value = 40, \
-       .max_efuse_value = 100, \
-       .first_point_trim = 25, \
-       .second_point_trim = 85, \
-       .default_temp_offset = 50, \
-       .freq_tab[0] = { \
-               .freq_clip_max = 800 * 1000, \
-               .temp_level = 85, \
-       }, \
-       .freq_tab[1] = { \
-               .freq_clip_max = 200 * 1000, \
-               .temp_level = 103, \
-       }, \
-       .freq_tab_count = 2, \
-
-#define EXYNOS5260_TMU_DATA \
-       __EXYNOS5260_TMU_DATA \
-       .type = SOC_ARCH_EXYNOS5260
-
-struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
-       .tmu_data = {
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-       },
-       .tmu_count = 5,
-};
-
-#define EXYNOS5420_TMU_DATA \
-       __EXYNOS5260_TMU_DATA \
-       .type = SOC_ARCH_EXYNOS5420
-
-#define EXYNOS5420_TMU_DATA_SHARED \
-       __EXYNOS5260_TMU_DATA \
-       .type = SOC_ARCH_EXYNOS5420_TRIMINFO
-
-struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
-       .tmu_data = {
-               { EXYNOS5420_TMU_DATA },
-               { EXYNOS5420_TMU_DATA },
-               { EXYNOS5420_TMU_DATA_SHARED },
-               { EXYNOS5420_TMU_DATA_SHARED },
-               { EXYNOS5420_TMU_DATA_SHARED },
-       },
-       .tmu_count = 5,
-};
-
-#define EXYNOS5440_TMU_DATA \
-       .trigger_levels[0] = 100, \
-       .trigger_levels[4] = 105, \
-       .trigger_enable[0] = 1, \
-       .trigger_type[0] = SW_TRIP, \
-       .trigger_type[4] = HW_TRIP, \
-       .max_trigger_level = 5, \
-       .non_hw_trigger_levels = 1, \
-       .gain = 5, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .efuse_value = 0x5b2d, \
-       .min_efuse_value = 16, \
-       .max_efuse_value = 76, \
-       .first_point_trim = 25, \
-       .second_point_trim = 70, \
-       .default_temp_offset = 25, \
-       .type = SOC_ARCH_EXYNOS5440
-
-struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
-       .tmu_data = {
-               { EXYNOS5440_TMU_DATA } ,
-               { EXYNOS5440_TMU_DATA } ,
-               { EXYNOS5440_TMU_DATA } ,
-       },
-       .tmu_count = 3,
-};
index fdd1f523a1eda4e94110e12dac531c218837dc08..5a0f12d08e8b81cc26b71b7e6c065091a098920f 100644 (file)
@@ -45,7 +45,7 @@
  *    c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
  *       if the cooling state already equals lower limit,
- *       deactive the thermal instance
+ *       deactivate the thermal instance
  */
 static unsigned long get_target_state(struct thermal_instance *instance,
                                enum thermal_trend trend, bool throttle)
@@ -169,7 +169,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 }
 
 /**
- * step_wise_throttle - throttles devices asscciated with the given zone
+ * step_wise_throttle - throttles devices associated with the given zone
  * @tz - thermal_zone_device
  * @trip - the trip point
  * @trip_type - type of the trip point
index 634b6ce0e63ace5757c06513b45ac563dcfab593..62a5d449c38805019db7d554e6c0f7f43d215341 100644 (file)
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
 {
        int i;
index 3fb054a10f6a0fde450e29a98ee4cdf90e18f6c7..a38c1756442aa2611e0e207466aabba4e72b94cc 100644 (file)
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
 
        data = ti_bandgap_get_sensor_data(bgp, id);
 
-       if (data && data->cool_dev)
+       if (data)
                cpufreq_cooling_unregister(data->cool_dev);
 
        return 0;
index 5d916c7a216b86829d4f10190787583258f3852e..d2501f01cd03483b3f83a384027275abb5b5c089 100644 (file)
@@ -489,7 +489,7 @@ config SERIAL_MFD_HSU
        select SERIAL_CORE
 
 config SERIAL_MFD_HSU_CONSOLE
-       boolean "Medfile HSU serial console support"
+       bool "Medfile HSU serial console support"
        depends on SERIAL_MFD_HSU=y
        select SERIAL_CORE_CONSOLE
 
index 96539038c03ace1de0181b1f1d772aa10a7e67b2..b454d05be5838e08ecdda4fd60514ad0f444edc9 100644 (file)
@@ -45,7 +45,7 @@ menuconfig USB_GADGET
 if USB_GADGET
 
 config USB_GADGET_DEBUG
-       boolean "Debugging messages (DEVELOPMENT)"
+       bool "Debugging messages (DEVELOPMENT)"
        depends on DEBUG_KERNEL
        help
           Many controller and gadget drivers will print some debugging
@@ -73,7 +73,7 @@ config USB_GADGET_VERBOSE
           production build.
 
 config USB_GADGET_DEBUG_FILES
-       boolean "Debugging information files (DEVELOPMENT)"
+       bool "Debugging information files (DEVELOPMENT)"
        depends on PROC_FS
        help
           Some of the drivers in the "gadget" framework can expose
@@ -84,7 +84,7 @@ config USB_GADGET_DEBUG_FILES
           here.  If in doubt, or to conserve kernel memory, say "N".
 
 config USB_GADGET_DEBUG_FS
-       boolean "Debugging information files in debugfs (DEVELOPMENT)"
+       bool "Debugging information files in debugfs (DEVELOPMENT)"
        depends on DEBUG_FS
        help
           Some of the drivers in the "gadget" framework can expose
@@ -230,7 +230,7 @@ config USB_CONFIGFS
          For more information see Documentation/usb/gadget_configfs.txt.
 
 config USB_CONFIGFS_SERIAL
-       boolean "Generic serial bulk in/out"
+       bool "Generic serial bulk in/out"
        depends on USB_CONFIGFS
        depends on TTY
        select USB_U_SERIAL
@@ -239,7 +239,7 @@ config USB_CONFIGFS_SERIAL
          The function talks to the Linux-USB generic serial driver.
 
 config USB_CONFIGFS_ACM
-       boolean "Abstract Control Model (CDC ACM)"
+       bool "Abstract Control Model (CDC ACM)"
        depends on USB_CONFIGFS
        depends on TTY
        select USB_U_SERIAL
@@ -249,7 +249,7 @@ config USB_CONFIGFS_ACM
          MS-Windows hosts or with the Linux-USB "cdc-acm" driver.
 
 config USB_CONFIGFS_OBEX
-       boolean "Object Exchange Model (CDC OBEX)"
+       bool "Object Exchange Model (CDC OBEX)"
        depends on USB_CONFIGFS
        depends on TTY
        select USB_U_SERIAL
@@ -259,7 +259,7 @@ config USB_CONFIGFS_OBEX
          since the kernel itself doesn't implement the OBEX protocol.
 
 config USB_CONFIGFS_NCM
-       boolean "Network Control Model (CDC NCM)"
+       bool "Network Control Model (CDC NCM)"
        depends on USB_CONFIGFS
        depends on NET
        select USB_U_ETHER
@@ -270,7 +270,7 @@ config USB_CONFIGFS_NCM
          different alignment possibilities.
 
 config USB_CONFIGFS_ECM
-       boolean "Ethernet Control Model (CDC ECM)"
+       bool "Ethernet Control Model (CDC ECM)"
        depends on USB_CONFIGFS
        depends on NET
        select USB_U_ETHER
@@ -282,7 +282,7 @@ config USB_CONFIGFS_ECM
          supported by firmware for smart network devices.
 
 config USB_CONFIGFS_ECM_SUBSET
-       boolean "Ethernet Control Model (CDC ECM) subset"
+       bool "Ethernet Control Model (CDC ECM) subset"
        depends on USB_CONFIGFS
        depends on NET
        select USB_U_ETHER
@@ -323,7 +323,7 @@ config USB_CONFIGFS_EEM
          the host is the same (a usbX device), so the differences are minimal.
 
 config USB_CONFIGFS_PHONET
-       boolean "Phonet protocol"
+       bool "Phonet protocol"
        depends on USB_CONFIGFS
        depends on NET
        depends on PHONET
@@ -333,7 +333,7 @@ config USB_CONFIGFS_PHONET
          The Phonet protocol implementation for USB device.
 
 config USB_CONFIGFS_MASS_STORAGE
-       boolean "Mass storage"
+       bool "Mass storage"
        depends on USB_CONFIGFS
        depends on BLOCK
        select USB_F_MASS_STORAGE
@@ -344,7 +344,7 @@ config USB_CONFIGFS_MASS_STORAGE
          specified as a module parameter or sysfs option.
 
 config USB_CONFIGFS_F_LB_SS
-       boolean "Loopback and sourcesink function (for testing)"
+       bool "Loopback and sourcesink function (for testing)"
        depends on USB_CONFIGFS
        select USB_F_SS_LB
        help
@@ -357,7 +357,7 @@ config USB_CONFIGFS_F_LB_SS
          and its driver through a basic set of functional tests.
 
 config USB_CONFIGFS_F_FS
-       boolean "Function filesystem (FunctionFS)"
+       bool "Function filesystem (FunctionFS)"
        depends on USB_CONFIGFS
        select USB_F_FS
        help
@@ -369,7 +369,7 @@ config USB_CONFIGFS_F_FS
          mass storage) and other are implemented in user space.
 
 config USB_CONFIGFS_F_UAC1
-       boolean "Audio Class 1.0"
+       bool "Audio Class 1.0"
        depends on USB_CONFIGFS
        depends on SND
        select USB_LIBCOMPOSITE
@@ -382,7 +382,7 @@ config USB_CONFIGFS_F_UAC1
          on the device.
 
 config USB_CONFIGFS_F_UAC2
-       boolean "Audio Class 2.0"
+       bool "Audio Class 2.0"
        depends on USB_CONFIGFS
        depends on SND
        select USB_LIBCOMPOSITE
@@ -400,7 +400,7 @@ config USB_CONFIGFS_F_UAC2
          wants as audio data to the USB Host.
 
 config USB_CONFIGFS_F_MIDI
-       boolean "MIDI function"
+       bool "MIDI function"
        depends on USB_CONFIGFS
        depends on SND
        select USB_LIBCOMPOSITE
@@ -414,7 +414,7 @@ config USB_CONFIGFS_F_MIDI
          ALSA's aconnect utility etc.
 
 config USB_CONFIGFS_F_HID
-       boolean "HID function"
+       bool "HID function"
        depends on USB_CONFIGFS
        select USB_F_HID
        help
index fd48ef3af4eb76f6ce34314cfced2ca305e395c5..113c87e22117d432708001bec07957678267ea46 100644 (file)
@@ -40,7 +40,7 @@ config USB_ZERO
          dynamically linked module called "g_zero".
 
 config USB_ZERO_HNPTEST
-       boolean "HNP Test Device"
+       bool "HNP Test Device"
        depends on USB_ZERO && USB_OTG
        help
          You can configure this device to enumerate using the device
index 366e551aeff0bd11084ac89e4b131b6f6953369d..9a3a6b00391aa41042836d78d1b88eed27b9ee73 100644 (file)
@@ -199,7 +199,7 @@ config USB_S3C2410
          S3C2440 processors.
 
 config USB_S3C2410_DEBUG
-       boolean "S3C2410 udc debug messages"
+       bool "S3C2410 udc debug messages"
        depends on USB_S3C2410
 
 config USB_S3C_HSUDC
@@ -288,7 +288,7 @@ config USB_NET2272
          gadget drivers to also be dynamically linked.
 
 config USB_NET2272_DMA
-       boolean "Support external DMA controller"
+       bool "Support external DMA controller"
        depends on USB_NET2272 && HAS_DMA
        help
          The NET2272 part can optionally support an external DMA
index c6d0c8e745b976232301e55493b45756d5635fb1..52d3d58252e1fd4a3702705a8d12f57d81942ca2 100644 (file)
@@ -119,7 +119,7 @@ config TAHVO_USB
 
 config TAHVO_USB_HOST_BY_DEFAULT
        depends on TAHVO_USB
-       boolean "Device in USB host mode by default"
+       bool "Device in USB host mode by default"
        help
          Say Y here, if you want the device to enter USB host mode
          by default on bootup.
index 7cc0122a18cecbb7ef45cf8e438112ec2fb4ff00..f8a186381ae8726887657c6c868a8b2ceeef2e54 100644 (file)
@@ -239,9 +239,12 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
 
                        return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
                }
-       } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX)
+       } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
                if (pci_is_pcie(vdev->pdev))
                        return 1;
+       } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
+               return 1;
+       }
 
        return 0;
 }
@@ -464,6 +467,7 @@ static long vfio_pci_ioctl(void *device_data,
 
                switch (info.index) {
                case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+               case VFIO_PCI_REQ_IRQ_INDEX:
                        break;
                case VFIO_PCI_ERR_IRQ_INDEX:
                        if (pci_is_pcie(vdev->pdev))
@@ -828,6 +832,20 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
                               req_len, vma->vm_page_prot);
 }
 
+static void vfio_pci_request(void *device_data, unsigned int count)
+{
+       struct vfio_pci_device *vdev = device_data;
+
+       mutex_lock(&vdev->igate);
+
+       if (vdev->req_trigger) {
+               dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+               eventfd_signal(vdev->req_trigger, 1);
+       }
+
+       mutex_unlock(&vdev->igate);
+}
+
 static const struct vfio_device_ops vfio_pci_ops = {
        .name           = "vfio-pci",
        .open           = vfio_pci_open,
@@ -836,6 +854,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
        .read           = vfio_pci_read,
        .write          = vfio_pci_write,
        .mmap           = vfio_pci_mmap,
+       .request        = vfio_pci_request,
 };
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index e8d695b3f54e0fdbf74e567dc5ae109cb8ea5da2..f88bfdf5b6a036a6bf1aae3b8abe3ec6d944ffe0 100644 (file)
@@ -763,46 +763,70 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
        return 0;
 }
 
-static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
-                                   unsigned index, unsigned start,
-                                   unsigned count, uint32_t flags, void *data)
+static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
+                                          uint32_t flags, void *data)
 {
        int32_t fd = *(int32_t *)data;
 
-       if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
-           !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
+       if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
                return -EINVAL;
 
        /* DATA_NONE/DATA_BOOL enables loopback testing */
        if (flags & VFIO_IRQ_SET_DATA_NONE) {
-               if (vdev->err_trigger)
-                       eventfd_signal(vdev->err_trigger, 1);
+               if (*ctx)
+                       eventfd_signal(*ctx, 1);
                return 0;
        } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
                uint8_t trigger = *(uint8_t *)data;
-               if (trigger && vdev->err_trigger)
-                       eventfd_signal(vdev->err_trigger, 1);
+               if (trigger && *ctx)
+                       eventfd_signal(*ctx, 1);
                return 0;
        }
 
        /* Handle SET_DATA_EVENTFD */
        if (fd == -1) {
-               if (vdev->err_trigger)
-                       eventfd_ctx_put(vdev->err_trigger);
-               vdev->err_trigger = NULL;
+               if (*ctx)
+                       eventfd_ctx_put(*ctx);
+               *ctx = NULL;
                return 0;
        } else if (fd >= 0) {
                struct eventfd_ctx *efdctx;
                efdctx = eventfd_ctx_fdget(fd);
                if (IS_ERR(efdctx))
                        return PTR_ERR(efdctx);
-               if (vdev->err_trigger)
-                       eventfd_ctx_put(vdev->err_trigger);
-               vdev->err_trigger = efdctx;
+               if (*ctx)
+                       eventfd_ctx_put(*ctx);
+               *ctx = efdctx;
                return 0;
        } else
                return -EINVAL;
 }
+
+static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+                                   unsigned index, unsigned start,
+                                   unsigned count, uint32_t flags, void *data)
+{
+       if (index != VFIO_PCI_ERR_IRQ_INDEX)
+               return -EINVAL;
+
+       /*
+        * We should sanitize start & count, but that wasn't caught
+        * originally, so this IRQ index must forever ignore them :-(
+        */
+
+       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+}
+
+static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
+                                   unsigned index, unsigned start,
+                                   unsigned count, uint32_t flags, void *data)
+{
+       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+               return -EINVAL;
+
+       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+}
+
 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
                            unsigned index, unsigned start, unsigned count,
                            void *data)
@@ -844,6 +868,12 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
                                func = vfio_pci_set_err_trigger;
                        break;
                }
+       case VFIO_PCI_REQ_IRQ_INDEX:
+               switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+               case VFIO_IRQ_SET_ACTION_TRIGGER:
+                       func = vfio_pci_set_req_trigger;
+                       break;
+               }
        }
 
        if (!func)
index 671c17a6e6d029dfdffe5d7243150ed757e44cf4..c9f9b323f152733685f9dafc67793472921658ec 100644 (file)
@@ -58,6 +58,7 @@ struct vfio_pci_device {
        struct pci_saved_state  *pci_saved_state;
        int                     refcnt;
        struct eventfd_ctx      *err_trigger;
+       struct eventfd_ctx      *req_trigger;
 };
 
 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
index f018d8d0f975360a091339699d12348c5c93a12b..4cde8550144406c02715448b8fbc4a88b538189b 100644 (file)
@@ -63,6 +63,11 @@ struct vfio_container {
        void                            *iommu_data;
 };
 
+struct vfio_unbound_dev {
+       struct device                   *dev;
+       struct list_head                unbound_next;
+};
+
 struct vfio_group {
        struct kref                     kref;
        int                             minor;
@@ -75,6 +80,8 @@ struct vfio_group {
        struct notifier_block           nb;
        struct list_head                vfio_next;
        struct list_head                container_next;
+       struct list_head                unbound_list;
+       struct mutex                    unbound_lock;
        atomic_t                        opened;
 };
 
@@ -204,6 +211,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
        kref_init(&group->kref);
        INIT_LIST_HEAD(&group->device_list);
        mutex_init(&group->device_lock);
+       INIT_LIST_HEAD(&group->unbound_list);
+       mutex_init(&group->unbound_lock);
        atomic_set(&group->container_users, 0);
        atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
@@ -264,13 +273,22 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 static void vfio_group_release(struct kref *kref)
 {
        struct vfio_group *group = container_of(kref, struct vfio_group, kref);
+       struct vfio_unbound_dev *unbound, *tmp;
+       struct iommu_group *iommu_group = group->iommu_group;
 
        WARN_ON(!list_empty(&group->device_list));
 
+       list_for_each_entry_safe(unbound, tmp,
+                                &group->unbound_list, unbound_next) {
+               list_del(&unbound->unbound_next);
+               kfree(unbound);
+       }
+
        device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
        list_del(&group->vfio_next);
        vfio_free_group_minor(group->minor);
        vfio_group_unlock_and_free(group);
+       iommu_group_put(iommu_group);
 }
 
 static void vfio_group_put(struct vfio_group *group)
@@ -440,17 +458,36 @@ static bool vfio_whitelisted_driver(struct device_driver *drv)
 }
 
 /*
- * A vfio group is viable for use by userspace if all devices are either
- * driver-less or bound to a vfio or whitelisted driver.  We test the
- * latter by the existence of a struct vfio_device matching the dev.
+ * A vfio group is viable for use by userspace if all devices are in
+ * one of the following states:
+ *  - driver-less
+ *  - bound to a vfio driver
+ *  - bound to a whitelisted driver
+ *
+ * We use two methods to determine whether a device is bound to a vfio
+ * driver.  The first is to test whether the device exists in the vfio
+ * group.  The second is to test if the device exists on the group
+ * unbound_list, indicating it's in the middle of transitioning from
+ * a vfio driver to driver-less.
  */
 static int vfio_dev_viable(struct device *dev, void *data)
 {
        struct vfio_group *group = data;
        struct vfio_device *device;
        struct device_driver *drv = ACCESS_ONCE(dev->driver);
+       struct vfio_unbound_dev *unbound;
+       int ret = -EINVAL;
 
-       if (!drv || vfio_whitelisted_driver(drv))
+       mutex_lock(&group->unbound_lock);
+       list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
+               if (dev == unbound->dev) {
+                       ret = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&group->unbound_lock);
+
+       if (!ret || !drv || vfio_whitelisted_driver(drv))
                return 0;
 
        device = vfio_group_get_device(group, dev);
@@ -459,7 +496,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
                return 0;
        }
 
-       return -EINVAL;
+       return ret;
 }
 
 /**
@@ -501,6 +538,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
 {
        struct vfio_group *group = container_of(nb, struct vfio_group, nb);
        struct device *dev = data;
+       struct vfio_unbound_dev *unbound;
 
        /*
         * Need to go through a group_lock lookup to get a reference or we
@@ -550,6 +588,17 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
                 * stop the system to maintain isolation.  At a minimum, we'd
                 * want a toggle to disable driver auto probe for this device.
                 */
+
+               mutex_lock(&group->unbound_lock);
+               list_for_each_entry(unbound,
+                                   &group->unbound_list, unbound_next) {
+                       if (dev == unbound->dev) {
+                               list_del(&unbound->unbound_next);
+                               kfree(unbound);
+                               break;
+                       }
+               }
+               mutex_unlock(&group->unbound_lock);
                break;
        }
 
@@ -578,6 +627,12 @@ int vfio_add_group_dev(struct device *dev,
                        iommu_group_put(iommu_group);
                        return PTR_ERR(group);
                }
+       } else {
+               /*
+                * A found vfio_group already holds a reference to the
+                * iommu_group.  A created vfio_group keeps the reference.
+                */
+               iommu_group_put(iommu_group);
        }
 
        device = vfio_group_get_device(group, dev);
@@ -586,21 +641,19 @@ int vfio_add_group_dev(struct device *dev,
                     dev_name(dev), iommu_group_id(iommu_group));
                vfio_device_put(device);
                vfio_group_put(group);
-               iommu_group_put(iommu_group);
                return -EBUSY;
        }
 
        device = vfio_group_create_device(group, dev, ops, device_data);
        if (IS_ERR(device)) {
                vfio_group_put(group);
-               iommu_group_put(iommu_group);
                return PTR_ERR(device);
        }
 
        /*
-        * Added device holds reference to iommu_group and vfio_device
-        * (which in turn holds reference to vfio_group).  Drop extra
-        * group reference used while acquiring device.
+        * Drop all but the vfio_device reference.  The vfio_device holds
+        * a reference to the vfio_group, which holds a reference to the
+        * iommu_group.
         */
        vfio_group_put(group);
 
@@ -655,8 +708,9 @@ void *vfio_del_group_dev(struct device *dev)
 {
        struct vfio_device *device = dev_get_drvdata(dev);
        struct vfio_group *group = device->group;
-       struct iommu_group *iommu_group = group->iommu_group;
        void *device_data = device->device_data;
+       struct vfio_unbound_dev *unbound;
+       unsigned int i = 0;
 
        /*
         * The group exists so long as we have a device reference.  Get
@@ -664,14 +718,49 @@ void *vfio_del_group_dev(struct device *dev)
         */
        vfio_group_get(group);
 
+       /*
+        * When the device is removed from the group, the group suddenly
+        * becomes non-viable; the device has a driver (until the unbind
+        * completes), but it's not present in the group.  This is bad news
+        * for any external users that need to re-acquire a group reference
+        * in order to match and release their existing reference.  To
+        * solve this, we track such devices on the unbound_list to bridge
+        * the gap until they're fully unbound.
+        */
+       unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
+       if (unbound) {
+               unbound->dev = dev;
+               mutex_lock(&group->unbound_lock);
+               list_add(&unbound->unbound_next, &group->unbound_list);
+               mutex_unlock(&group->unbound_lock);
+       }
+       WARN_ON(!unbound);
+
        vfio_device_put(device);
 
-       /* TODO send a signal to encourage this to be released */
-       wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+       /*
+        * If the device is still present in the group after the above
+        * 'put', then it is in use and we need to request it from the
+        * bus driver.  The driver may in turn need to request the
+        * device from the user.  We send the request on an arbitrary
+        * interval with counter to allow the driver to take escalating
+        * measures to release the device if it has the ability to do so.
+        */
+       do {
+               device = vfio_group_get_device(group, dev);
+               if (!device)
+                       break;
 
-       vfio_group_put(group);
+               if (device->ops->request)
+                       device->ops->request(device_data, i++);
 
-       iommu_group_put(iommu_group);
+               vfio_device_put(device);
+
+       } while (wait_event_interruptible_timeout(vfio.release_q,
+                                                 !vfio_dev_present(group, dev),
+                                                 HZ * 10) <= 0);
+
+       vfio_group_put(group);
 
        return device_data;
 }
index 4a9d666f1e9186ed07071c0d909ba167e814998a..57d8c37a002b0e0b8d49891536b8ddfab64afc6b 100644 (file)
@@ -66,6 +66,7 @@ struct vfio_domain {
        struct list_head        next;
        struct list_head        group_list;
        int                     prot;           /* IOMMU_CACHE */
+       bool                    fgsp;           /* Fine-grained super pages */
 };
 
 struct vfio_dma {
@@ -264,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        bool lock_cap = capable(CAP_IPC_LOCK);
        long ret, i;
+       bool rsvd;
 
        if (!current->mm)
                return -ENODEV;
@@ -272,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        if (ret)
                return ret;
 
-       if (is_invalid_reserved_pfn(*pfn_base))
-               return 1;
+       rsvd = is_invalid_reserved_pfn(*pfn_base);
 
-       if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+       if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
                put_pfn(*pfn_base, prot);
                pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
                        limit << PAGE_SHIFT);
@@ -283,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        }
 
        if (unlikely(disable_hugepages)) {
-               vfio_lock_acct(1);
+               if (!rsvd)
+                       vfio_lock_acct(1);
                return 1;
        }
 
@@ -295,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
                if (ret)
                        break;
 
-               if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+               if (pfn != *pfn_base + i ||
+                   rsvd != is_invalid_reserved_pfn(pfn)) {
                        put_pfn(pfn, prot);
                        break;
                }
 
-               if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
+               if (!rsvd && !lock_cap &&
+                   current->mm->locked_vm + i + 1 > limit) {
                        put_pfn(pfn, prot);
                        pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
                                __func__, limit << PAGE_SHIFT);
@@ -308,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
                }
        }
 
-       vfio_lock_acct(i);
+       if (!rsvd)
+               vfio_lock_acct(i);
 
        return i;
 }
@@ -346,12 +351,14 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
        domain = d = list_first_entry(&iommu->domain_list,
                                      struct vfio_domain, next);
 
-       list_for_each_entry_continue(d, &iommu->domain_list, next)
+       list_for_each_entry_continue(d, &iommu->domain_list, next) {
                iommu_unmap(d->domain, dma->iova, dma->size);
+               cond_resched();
+       }
 
        while (iova < end) {
-               size_t unmapped;
-               phys_addr_t phys;
+               size_t unmapped, len;
+               phys_addr_t phys, next;
 
                phys = iommu_iova_to_phys(domain->domain, iova);
                if (WARN_ON(!phys)) {
@@ -359,7 +366,19 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                        continue;
                }
 
-               unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE);
+               /*
+                * To optimize for fewer iommu_unmap() calls, each of which
+                * may require hardware cache flushing, try to find the
+                * largest contiguous physical memory chunk to unmap.
+                */
+               for (len = PAGE_SIZE;
+                    !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
+                       next = iommu_iova_to_phys(domain->domain, iova + len);
+                       if (next != phys + len)
+                               break;
+               }
+
+               unmapped = iommu_unmap(domain->domain, iova, len);
                if (WARN_ON(!unmapped))
                        break;
 
@@ -367,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                                             unmapped >> PAGE_SHIFT,
                                             dma->prot, false);
                iova += unmapped;
+
+               cond_resched();
        }
 
        vfio_lock_acct(-unlocked);
@@ -511,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
                            map_try_harder(d, iova, pfn, npage, prot))
                                goto unwind;
                }
+
+               cond_resched();
        }
 
        return 0;
@@ -665,6 +688,39 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
        return 0;
 }
 
+/*
+ * We change our unmap behavior slightly depending on whether the IOMMU
+ * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
+ * for practically any contiguous power-of-two mapping we give it.  This means
+ * we don't need to look for contiguous chunks ourselves to make unmapping
+ * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
+ * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
+ * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
+ * hugetlbfs is in use.
+ */
+static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+{
+       struct page *pages;
+       int ret, order = get_order(PAGE_SIZE * 2);
+
+       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!pages)
+               return;
+
+       ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
+                       IOMMU_READ | IOMMU_WRITE | domain->prot);
+       if (!ret) {
+               size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+
+               if (unmapped == PAGE_SIZE)
+                       iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
+               else
+                       domain->fgsp = true;
+       }
+
+       __free_pages(pages, order);
+}
+
 static int vfio_iommu_type1_attach_group(void *iommu_data,
                                         struct iommu_group *iommu_group)
 {
@@ -758,6 +814,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
                }
        }
 
+       vfio_test_domain_fgsp(domain);
+
        /* replay mappings on new domains */
        ret = vfio_iommu_replay(iommu, domain);
        if (ret)
index 633012cc9a57d19bdcd74bb04c66c2d75191197b..18f05bff8826672a17a645d8fddc2a7df489e302 100644 (file)
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
                         * TODO: support TSO.
                         */
                        iov_iter_advance(&msg.msg_iter, vhost_hlen);
-               } else {
-                       /* It'll come from socket; we'll need to patch
-                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
-                        */
-                       iov_iter_advance(&fixup, sizeof(hdr));
                }
                err = sock->ops->recvmsg(sock, &msg,
                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
                        continue;
                }
                /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
-               if (unlikely(vhost_hlen) &&
-                   copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
-                       vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
-                              vq->iov->iov_base);
-                       break;
+               if (unlikely(vhost_hlen)) {
+                       if (copy_to_iter(&hdr, sizeof(hdr),
+                                        &fixup) != sizeof(hdr)) {
+                               vq_err(vq, "Unable to write vnet_hdr "
+                                      "at addr %p\n", vq->iov->iov_base);
+                               break;
+                       }
+               } else {
+                       /* Header came from socket; we'll need to patch
+                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+                        */
+                       iov_iter_advance(&fixup, sizeof(hdr));
                }
                /* TODO: Should check and handle checksum. */
 
                num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   copy_to_iter(&num_buffers, 2, &fixup) != 2) {
+                   copy_to_iter(&num_buffers, sizeof num_buffers,
+                                &fixup) != sizeof num_buffers) {
                        vq_err(vq, "Failed num_buffers write");
                        vhost_discard_vq_desc(vq, headcount);
                        break;
index dc78d87e0fc2c5e14832adf674899823ac223c5f..8d4f3f1ff799fb1d7b4bb5a2184144b515676a2c 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
-#include <scsi/scsi_tcq.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 
 #include "vhost.h"
 
-#define TCM_VHOST_VERSION  "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
-#define TCM_VHOST_DEFAULT_TAGS 256
-#define TCM_VHOST_PREALLOC_SGLS 2048
-#define TCM_VHOST_PREALLOC_UPAGES 2048
-#define TCM_VHOST_PREALLOC_PROT_SGLS 512
+#define VHOST_SCSI_VERSION  "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_DEFAULT_TAGS 256
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
 
 struct vhost_scsi_inflight {
        /* Wait for the flush operation to finish */
@@ -67,11 +66,13 @@ struct vhost_scsi_inflight {
        struct kref kref;
 };
 
-struct tcm_vhost_cmd {
+struct vhost_scsi_cmd {
        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
        int tvc_vq_desc;
        /* virtio-scsi initiator task attribute */
        int tvc_task_attr;
+       /* virtio-scsi response incoming iovecs */
+       int tvc_in_iovs;
        /* virtio-scsi initiator data direction */
        enum dma_data_direction tvc_data_direction;
        /* Expected data transfer length from virtio-scsi header */
@@ -81,26 +82,26 @@ struct tcm_vhost_cmd {
        /* The number of scatterlists associated with this cmd */
        u32 tvc_sgl_count;
        u32 tvc_prot_sgl_count;
-       /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+       /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
        u32 tvc_lun;
        /* Pointer to the SGL formatted memory from virtio-scsi */
        struct scatterlist *tvc_sgl;
        struct scatterlist *tvc_prot_sgl;
        struct page **tvc_upages;
-       /* Pointer to response */
-       struct virtio_scsi_cmd_resp __user *tvc_resp;
+       /* Pointer to response header iovec */
+       struct iovec *tvc_resp_iov;
        /* Pointer to vhost_scsi for our device */
        struct vhost_scsi *tvc_vhost;
        /* Pointer to vhost_virtqueue for the cmd */
        struct vhost_virtqueue *tvc_vq;
        /* Pointer to vhost nexus memory */
-       struct tcm_vhost_nexus *tvc_nexus;
+       struct vhost_scsi_nexus *tvc_nexus;
        /* The TCM I/O descriptor that is accessed via container_of() */
        struct se_cmd tvc_se_cmd;
-       /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+       /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
        struct work_struct work;
        /* Copy of the incoming SCSI command descriptor block (CDB) */
-       unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+       unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
        /* Sense buffer that will be mapped into outgoing status */
        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
        /* Completed commands list, serviced from vhost worker thread */
@@ -109,53 +110,53 @@ struct tcm_vhost_cmd {
        struct vhost_scsi_inflight *inflight;
 };
 
-struct tcm_vhost_nexus {
+struct vhost_scsi_nexus {
        /* Pointer to TCM session for I_T Nexus */
        struct se_session *tvn_se_sess;
 };
 
-struct tcm_vhost_nacl {
+struct vhost_scsi_nacl {
        /* Binary World Wide unique Port Name for Vhost Initiator port */
        u64 iport_wwpn;
        /* ASCII formatted WWPN for Sas Initiator port */
-       char iport_name[TCM_VHOST_NAMELEN];
-       /* Returned by tcm_vhost_make_nodeacl() */
+       char iport_name[VHOST_SCSI_NAMELEN];
+       /* Returned by vhost_scsi_make_nodeacl() */
        struct se_node_acl se_node_acl;
 };
 
-struct tcm_vhost_tpg {
+struct vhost_scsi_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
        int tv_tpg_port_count;
        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
        int tv_tpg_vhost_count;
-       /* list for tcm_vhost_list */
+       /* list for vhost_scsi_list */
        struct list_head tv_tpg_list;
        /* Used to protect access for tpg_nexus */
        struct mutex tv_tpg_mutex;
        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
-       struct tcm_vhost_nexus *tpg_nexus;
-       /* Pointer back to tcm_vhost_tport */
-       struct tcm_vhost_tport *tport;
-       /* Returned by tcm_vhost_make_tpg() */
+       struct vhost_scsi_nexus *tpg_nexus;
+       /* Pointer back to vhost_scsi_tport */
+       struct vhost_scsi_tport *tport;
+       /* Returned by vhost_scsi_make_tpg() */
        struct se_portal_group se_tpg;
        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
        struct vhost_scsi *vhost_scsi;
 };
 
-struct tcm_vhost_tport {
+struct vhost_scsi_tport {
        /* SCSI protocol the tport is providing */
        u8 tport_proto_id;
        /* Binary World Wide unique Port Name for Vhost Target port */
        u64 tport_wwpn;
        /* ASCII formatted WWPN for Vhost Target port */
-       char tport_name[TCM_VHOST_NAMELEN];
-       /* Returned by tcm_vhost_make_tport() */
+       char tport_name[VHOST_SCSI_NAMELEN];
+       /* Returned by vhost_scsi_make_tport() */
        struct se_wwn tport_wwn;
 };
 
-struct tcm_vhost_evt {
+struct vhost_scsi_evt {
        /* event to be sent to guest */
        struct virtio_scsi_event event;
        /* event list, serviced from vhost worker thread */
@@ -171,7 +172,9 @@ enum {
 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 enum {
        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
-                                              (1ULL << VIRTIO_SCSI_F_T10_PI)
+                                              (1ULL << VIRTIO_SCSI_F_T10_PI) |
+                                              (1ULL << VIRTIO_F_ANY_LAYOUT) |
+                                              (1ULL << VIRTIO_F_VERSION_1)
 };
 
 #define VHOST_SCSI_MAX_TARGET  256
@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue {
 
 struct vhost_scsi {
        /* Protected by vhost_scsi->dev.mutex */
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tpg **vs_tpg;
        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 
        struct vhost_dev dev;
@@ -212,21 +215,21 @@ struct vhost_scsi {
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
 
-static struct workqueue_struct *tcm_vhost_workqueue;
+static struct workqueue_struct *vhost_scsi_workqueue;
 
-/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
-static DEFINE_MUTEX(tcm_vhost_mutex);
-static LIST_HEAD(tcm_vhost_list);
+/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(vhost_scsi_mutex);
+static LIST_HEAD(vhost_scsi_list);
 
-static int iov_num_pages(struct iovec *iov)
+static int iov_num_pages(void __user *iov_base, size_t iov_len)
 {
-       return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
-              ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+       return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
+              ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-static void tcm_vhost_done_inflight(struct kref *kref)
+static void vhost_scsi_done_inflight(struct kref *kref)
 {
        struct vhost_scsi_inflight *inflight;
 
@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref)
        complete(&inflight->comp);
 }
 
-static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
+static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
                                    struct vhost_scsi_inflight *old_inflight[])
 {
        struct vhost_scsi_inflight *new_inflight;
@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
 }
 
 static struct vhost_scsi_inflight *
-tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
+vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 {
        struct vhost_scsi_inflight *inflight;
        struct vhost_scsi_virtqueue *svq;
@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
        return inflight;
 }
 
-static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
+static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 {
-       kref_put(&inflight->kref, tcm_vhost_done_inflight);
+       kref_put(&inflight->kref, vhost_scsi_done_inflight);
 }
 
-static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
-static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 {
        return 0;
 }
 
-static char *tcm_vhost_get_fabric_name(void)
+static char *vhost_scsi_get_fabric_name(void)
 {
        return "vhost";
 }
 
-static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
        return sas_get_fabric_proto_ident(se_tpg);
 }
 
-static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        return &tport->tport_name[0];
 }
 
-static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
        return tpg->tport_tpgt;
 }
 
-static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
 static u32
-tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
                              struct se_node_acl *se_nacl,
                              struct t10_pr_registration *pr_reg,
                              int *format_code,
                              unsigned char *buf)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
 }
 
 static u32
-tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
                                  struct se_node_acl *se_nacl,
                                  struct t10_pr_registration *pr_reg,
                                  int *format_code)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 }
 
 static char *
-tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
                                    const char *buf,
                                    u32 *out_tid_len,
                                    char **port_nexus_ptr)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 }
 
 static struct se_node_acl *
-tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
+vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_nacl *nacl;
+       struct vhost_scsi_nacl *nacl;
 
-       nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+       nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
        if (!nacl) {
-               pr_err("Unable to allocate struct tcm_vhost_nacl\n");
+               pr_err("Unable to allocate struct vhost_scsi_nacl\n");
                return NULL;
        }
 
@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
 }
 
 static void
-tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
+vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
                             struct se_node_acl *se_nacl)
 {
-       struct tcm_vhost_nacl *nacl = container_of(se_nacl,
-                       struct tcm_vhost_nacl, se_node_acl);
+       struct vhost_scsi_nacl *nacl = container_of(se_nacl,
+                       struct vhost_scsi_nacl, se_node_acl);
        kfree(nacl);
 }
 
-static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
-static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
-       struct se_session *se_sess = se_cmd->se_sess;
+       struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
+       struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
        int i;
 
        if (tv_cmd->tvc_sgl_count) {
@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
        }
 
-       tcm_vhost_put_inflight(tv_cmd->inflight);
+       vhost_scsi_put_inflight(tv_cmd->inflight);
        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+static int vhost_scsi_shutdown_session(struct se_session *se_sess)
 {
        return 0;
 }
 
-static void tcm_vhost_close_session(struct se_session *se_sess)
+static void vhost_scsi_close_session(struct se_session *se_sess)
 {
        return;
 }
 
-static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 {
        return 0;
 }
 
-static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 {
        /* Go ahead and process the write immediately */
        target_execute_cmd(se_cmd);
        return 0;
 }
 
-static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 {
        return;
 }
 
-static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 {
        struct vhost_scsi *vs = cmd->tvc_vhost;
 
@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 }
 
-static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
+       struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
        vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
-static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
+       struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
        vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
-static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 {
        return;
 }
 
-static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 {
        return;
 }
 
-static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
        vs->vs_events_nr--;
        kfree(evt);
 }
 
-static struct tcm_vhost_evt *
-tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+static struct vhost_scsi_evt *
+vhost_scsi_allocate_evt(struct vhost_scsi *vs,
                       u32 event, u32 reason)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
 
        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
                vs->vs_events_missed = true;
@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 
        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
        if (!evt) {
-               vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+               vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
                vs->vs_events_missed = true;
                return NULL;
        }
@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
        return evt;
 }
 
-static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 {
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 
@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 }
 
 static void
-tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct virtio_scsi_event *event = &evt->event;
@@ -646,24 +649,24 @@ again:
        if (!ret)
                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
        else
-               vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+               vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 }
 
-static void tcm_vhost_evt_work(struct vhost_work *work)
+static void vhost_scsi_evt_work(struct vhost_work *work)
 {
        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
                                        vs_event_work);
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
        struct llist_node *llnode;
 
        mutex_lock(&vq->mutex);
        llnode = llist_del_all(&vs->vs_event_list);
        while (llnode) {
-               evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+               evt = llist_entry(llnode, struct vhost_scsi_evt, list);
                llnode = llist_next(llnode);
-               tcm_vhost_do_evt_work(vs, evt);
-               tcm_vhost_free_evt(vs, evt);
+               vhost_scsi_do_evt_work(vs, evt);
+               vhost_scsi_free_evt(vs, evt);
        }
        mutex_unlock(&vq->mutex);
 }
@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                        vs_completion_work);
        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
        struct virtio_scsi_cmd_resp v_rsp;
-       struct tcm_vhost_cmd *cmd;
+       struct vhost_scsi_cmd *cmd;
        struct llist_node *llnode;
        struct se_cmd *se_cmd;
+       struct iov_iter iov_iter;
        int ret, vq;
 
        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
        llnode = llist_del_all(&vs->vs_completion_list);
        while (llnode) {
-               cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+               cmd = llist_entry(llnode, struct vhost_scsi_cmd,
                                     tvc_completion_list);
                llnode = llist_next(llnode);
                se_cmd = &cmd->tvc_se_cmd;
@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                                 se_cmd->scsi_sense_length);
                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       se_cmd->scsi_sense_length);
-               ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
-               if (likely(ret == 0)) {
+
+               iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+                             cmd->tvc_in_iovs, sizeof(v_rsp));
+               ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+               if (likely(ret == sizeof(v_rsp))) {
                        struct vhost_scsi_virtqueue *q;
                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 }
 
-static struct tcm_vhost_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
+static struct vhost_scsi_cmd *
+vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
                   u32 exp_data_len, int data_direction)
 {
-       struct tcm_vhost_cmd *cmd;
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_cmd *cmd;
+       struct vhost_scsi_nexus *tv_nexus;
        struct se_session *se_sess;
        struct scatterlist *sg, *prot_sg;
        struct page **pages;
@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
 
        tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
-               pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+               pr_err("Unable to locate active struct vhost_scsi_nexus\n");
                return ERR_PTR(-EIO);
        }
        se_sess = tv_nexus->tvn_se_sess;
 
        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0) {
-               pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
+               pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
+       cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
        sg = cmd->tvc_sgl;
        prot_sg = cmd->tvc_prot_sgl;
        pages = cmd->tvc_upages;
-       memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
+       memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 
        cmd->tvc_sgl = sg;
        cmd->tvc_prot_sgl = prot_sg;
@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
        cmd->tvc_exp_data_len = exp_data_len;
        cmd->tvc_data_direction = data_direction;
        cmd->tvc_nexus = tv_nexus;
-       cmd->inflight = tcm_vhost_get_inflight(vq);
+       cmd->inflight = vhost_scsi_get_inflight(vq);
 
-       memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
+       memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 
        return cmd;
 }
@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
  * Returns the number of scatterlist entries used or -errno on error.
  */
 static int
-vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
+                     void __user *ptr,
+                     size_t len,
                      struct scatterlist *sgl,
-                     unsigned int sgl_count,
-                     struct iovec *iov,
-                     struct page **pages,
                      bool write)
 {
-       unsigned int npages = 0, pages_nr, offset, nbytes;
+       unsigned int npages = 0, offset, nbytes;
+       unsigned int pages_nr = iov_num_pages(ptr, len);
        struct scatterlist *sg = sgl;
-       void __user *ptr = iov->iov_base;
-       size_t len = iov->iov_len;
+       struct page **pages = cmd->tvc_upages;
        int ret, i;
 
-       pages_nr = iov_num_pages(iov);
-       if (pages_nr > sgl_count) {
-               pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " sgl_count: %u\n", pages_nr, sgl_count);
-               return -ENOBUFS;
-       }
-       if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
+       if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
-                       pages_nr, TCM_VHOST_PREALLOC_UPAGES);
+                      " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
+                       pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
                return -ENOBUFS;
        }
 
@@ -829,84 +829,94 @@ out:
 }
 
 static int
-vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
-                         struct iovec *iov,
-                         int niov,
-                         bool write)
+vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 {
-       struct scatterlist *sg = cmd->tvc_sgl;
-       unsigned int sgl_count = 0;
-       int ret, i;
+       int sgl_count = 0;
 
-       for (i = 0; i < niov; i++)
-               sgl_count += iov_num_pages(&iov[i]);
+       if (!iter || !iter->iov) {
+               pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
+                      " present\n", __func__, bytes);
+               return -EINVAL;
+       }
 
-       if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
-               pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
-                       " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
-                       sgl_count, TCM_VHOST_PREALLOC_SGLS);
-               return -ENOBUFS;
+       sgl_count = iov_iter_npages(iter, 0xffff);
+       if (sgl_count > max_sgls) {
+               pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+                      " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+               return -EINVAL;
        }
+       return sgl_count;
+}
 
-       pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
-       sg_init_table(sg, sgl_count);
-       cmd->tvc_sgl_count = sgl_count;
+static int
+vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+                     struct iov_iter *iter,
+                     struct scatterlist *sg, int sg_count)
+{
+       size_t off = iter->iov_offset;
+       int i, ret;
 
-       pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
+       for (i = 0; i < iter->nr_segs; i++) {
+               void __user *base = iter->iov[i].iov_base + off;
+               size_t len = iter->iov[i].iov_len - off;
 
-       for (i = 0; i < niov; i++) {
-               ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
-                                           cmd->tvc_upages, write);
+               ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
                if (ret < 0) {
-                       for (i = 0; i < cmd->tvc_sgl_count; i++)
-                               put_page(sg_page(&cmd->tvc_sgl[i]));
-
-                       cmd->tvc_sgl_count = 0;
+                       for (i = 0; i < sg_count; i++) {
+                               struct page *page = sg_page(&sg[i]);
+                               if (page)
+                                       put_page(page);
+                       }
                        return ret;
                }
                sg += ret;
-               sgl_count -= ret;
+               off = 0;
        }
        return 0;
 }
 
 static int
-vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
-                          struct iovec *iov,
-                          int niov,
-                          bool write)
-{
-       struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
-       unsigned int prot_sgl_count = 0;
-       int ret, i;
-
-       for (i = 0; i < niov; i++)
-               prot_sgl_count += iov_num_pages(&iov[i]);
-
-       if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
-               pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
-                       " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
-                       prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
-               return -ENOBUFS;
-       }
-
-       pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
-                prot_sg, prot_sgl_count);
-       sg_init_table(prot_sg, prot_sgl_count);
-       cmd->tvc_prot_sgl_count = prot_sgl_count;
-
-       for (i = 0; i < niov; i++) {
-               ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
-                                           cmd->tvc_upages, write);
+vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
+                size_t prot_bytes, struct iov_iter *prot_iter,
+                size_t data_bytes, struct iov_iter *data_iter)
+{
+       int sgl_count, ret;
+       bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
+
+       if (prot_bytes) {
+               sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
+                                                VHOST_SCSI_PREALLOC_PROT_SGLS);
+               if (sgl_count < 0)
+                       return sgl_count;
+
+               sg_init_table(cmd->tvc_prot_sgl, sgl_count);
+               cmd->tvc_prot_sgl_count = sgl_count;
+               pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+                        cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
+
+               ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
+                                           cmd->tvc_prot_sgl,
+                                           cmd->tvc_prot_sgl_count);
                if (ret < 0) {
-                       for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
-                               put_page(sg_page(&cmd->tvc_prot_sgl[i]));
-
                        cmd->tvc_prot_sgl_count = 0;
                        return ret;
                }
-               prot_sg += ret;
-               prot_sgl_count -= ret;
+       }
+       sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
+                                        VHOST_SCSI_PREALLOC_SGLS);
+       if (sgl_count < 0)
+               return sgl_count;
+
+       sg_init_table(cmd->tvc_sgl, sgl_count);
+       cmd->tvc_sgl_count = sgl_count;
+       pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+                 cmd->tvc_sgl, cmd->tvc_sgl_count);
+
+       ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
+                                   cmd->tvc_sgl, cmd->tvc_sgl_count);
+       if (ret < 0) {
+               cmd->tvc_sgl_count = 0;
+               return ret;
        }
        return 0;
 }
@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
        return TCM_SIMPLE_TAG;
 }
 
-static void tcm_vhost_submission_work(struct work_struct *work)
+static void vhost_scsi_submission_work(struct work_struct *work)
 {
-       struct tcm_vhost_cmd *cmd =
-               container_of(work, struct tcm_vhost_cmd, work);
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_cmd *cmd =
+               container_of(work, struct vhost_scsi_cmd, work);
+       struct vhost_scsi_nexus *tv_nexus;
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
        int rc;
@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 static void
 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tpg **vs_tpg, *tpg;
        struct virtio_scsi_cmd_req v_req;
        struct virtio_scsi_cmd_req_pi v_req_pi;
-       struct tcm_vhost_tpg *tpg;
-       struct tcm_vhost_cmd *cmd;
+       struct vhost_scsi_cmd *cmd;
+       struct iov_iter out_iter, in_iter, prot_iter, data_iter;
        u64 tag;
-       u32 exp_data_len, data_first, data_num, data_direction, prot_first;
-       unsigned out, in, i;
-       int head, ret, data_niov, prot_niov, prot_bytes;
-       size_t req_size;
+       u32 exp_data_len, data_direction;
+       unsigned out, in;
+       int head, ret, prot_bytes;
+       size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+       size_t out_size, in_size;
        u16 lun;
        u8 *target, *lunp, task_attr;
-       bool hdr_pi;
+       bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
        void *req, *cdb;
 
        mutex_lock(&vq->mutex);
@@ -1014,10 +1025,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 
        for (;;) {
                head = vhost_get_vq_desc(vq, vq->iov,
-                                       ARRAY_SIZE(vq->iov), &out, &in,
-                                       NULL, NULL);
+                                        ARRAY_SIZE(vq->iov), &out, &in,
+                                        NULL, NULL);
                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
-                                       head, out, in);
+                        head, out, in);
                /* On error, stop handling until the next kick. */
                if (unlikely(head < 0))
                        break;
@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        }
                        break;
                }
-
-               /* FIXME: BIDI operation */
-               if (out == 1 && in == 1) {
-                       data_direction = DMA_NONE;
-                       data_first = 0;
-                       data_num = 0;
-               } else if (out == 1 && in > 1) {
-                       data_direction = DMA_FROM_DEVICE;
-                       data_first = out + 1;
-                       data_num = in - 1;
-               } else if (out > 1 && in == 1) {
-                       data_direction = DMA_TO_DEVICE;
-                       data_first = 1;
-                       data_num = out - 1;
-               } else {
-                       vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
-                                       out, in);
-                       break;
-               }
-
                /*
-                * Check for a sane resp buffer so we can report errors to
-                * the guest.
+                * Check for a sane response buffer so we can report early
+                * errors back to the guest.
                 */
-               if (unlikely(vq->iov[out].iov_len !=
-                                       sizeof(struct virtio_scsi_cmd_resp))) {
-                       vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
-                               " bytes\n", vq->iov[out].iov_len);
+               if (unlikely(vq->iov[out].iov_len < rsp_size)) {
+                       vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
+                               " size, got %zu bytes\n", vq->iov[out].iov_len);
                        break;
                }
-
-               if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
+               /*
+                * Setup pointers and values based upon different virtio-scsi
+                * request header if T10_PI is enabled in KVM guest.
+                */
+               if (t10_pi) {
                        req = &v_req_pi;
+                       req_size = sizeof(v_req_pi);
                        lunp = &v_req_pi.lun[0];
                        target = &v_req_pi.lun[1];
-                       req_size = sizeof(v_req_pi);
-                       hdr_pi = true;
                } else {
                        req = &v_req;
+                       req_size = sizeof(v_req);
                        lunp = &v_req.lun[0];
                        target = &v_req.lun[1];
-                       req_size = sizeof(v_req);
-                       hdr_pi = false;
                }
+               /*
+                * FIXME: Not correct for BIDI operation
+                */
+               out_size = iov_length(vq->iov, out);
+               in_size = iov_length(&vq->iov[out], in);
 
-               if (unlikely(vq->iov[0].iov_len < req_size)) {
-                       pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
-                              req_size, vq->iov[0].iov_len);
-                       break;
-               }
-               ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
-               if (unlikely(ret)) {
-                       vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
-                       break;
-               }
+               /*
+                * Copy over the virtio-scsi request header, which for a
+                * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+                * single iovec may contain both the header + outgoing
+                * WRITE payloads.
+                *
+                * copy_from_iter() will advance out_iter, so that it will
+                * point at the start of the outgoing WRITE payload, if
+                * DMA_TO_DEVICE is set.
+                */
+               iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 
+               ret = copy_from_iter(req, req_size, &out_iter);
+               if (unlikely(ret != req_size)) {
+                       vq_err(vq, "Faulted on copy_from_iter\n");
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
+               }
                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
                if (unlikely(*lunp != 1)) {
+                       vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
 
                tpg = ACCESS_ONCE(vs_tpg[*target]);
-
-               /* Target does not exist, fail the request */
                if (unlikely(!tpg)) {
+                       /* Target does not exist, fail the request */
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
-
-               data_niov = data_num;
-               prot_niov = prot_first = prot_bytes = 0;
                /*
-                * Determine if any protection information iovecs are preceeding
-                * the actual data payload, and adjust data_first + data_niov
-                * values accordingly for vhost_scsi_map_iov_to_sgl() below.
+                * Determine data_direction by calculating the total outgoing
+                * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
+                * response headers respectively.
                 *
-                * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
+                * For DMA_TO_DEVICE this is out_iter, which is already pointing
+                * to the right place.
+                *
+                * For DMA_FROM_DEVICE, the iovec will be just past the end
+                * of the virtio-scsi response header in either the same
+                * or immediately following iovec.
+                *
+                * Any associated T10_PI bytes for the outgoing / incoming
+                * payloads are included in calculation of exp_data_len here.
                 */
-               if (hdr_pi) {
+               prot_bytes = 0;
+
+               if (out_size > req_size) {
+                       data_direction = DMA_TO_DEVICE;
+                       exp_data_len = out_size - req_size;
+                       data_iter = out_iter;
+               } else if (in_size > rsp_size) {
+                       data_direction = DMA_FROM_DEVICE;
+                       exp_data_len = in_size - rsp_size;
+
+                       iov_iter_init(&in_iter, READ, &vq->iov[out], in,
+                                     rsp_size + exp_data_len);
+                       iov_iter_advance(&in_iter, rsp_size);
+                       data_iter = in_iter;
+               } else {
+                       data_direction = DMA_NONE;
+                       exp_data_len = 0;
+               }
+               /*
+                * If T10_PI header + payload is present, setup prot_iter values
+                * and recalculate data_iter for vhost_scsi_mapal() mapping to
+                * host scatterlists via get_user_pages_fast().
+                */
+               if (t10_pi) {
                        if (v_req_pi.pi_bytesout) {
                                if (data_direction != DMA_TO_DEVICE) {
-                                       vq_err(vq, "Received non zero do_pi_niov"
-                                               ", but wrong data_direction\n");
-                                       goto err_cmd;
+                                       vq_err(vq, "Received non zero pi_bytesout,"
+                                               " but wrong data_direction\n");
+                                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                                       continue;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
                        } else if (v_req_pi.pi_bytesin) {
                                if (data_direction != DMA_FROM_DEVICE) {
-                                       vq_err(vq, "Received non zero di_pi_niov"
-                                               ", but wrong data_direction\n");
-                                       goto err_cmd;
+                                       vq_err(vq, "Received non zero pi_bytesin,"
+                                               " but wrong data_direction\n");
+                                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                                       continue;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
                        }
+                       /*
+                        * Set prot_iter to data_iter, and advance past any
+                        * preceeding prot_bytes that may be present.
+                        *
+                        * Also fix up the exp_data_len to reflect only the
+                        * actual data payload length.
+                        */
                        if (prot_bytes) {
-                               int tmp = 0;
-
-                               for (i = 0; i < data_num; i++) {
-                                       tmp += vq->iov[data_first + i].iov_len;
-                                       prot_niov++;
-                                       if (tmp >= prot_bytes)
-                                               break;
-                               }
-                               prot_first = data_first;
-                               data_first += prot_niov;
-                               data_niov = data_num - prot_niov;
+                               exp_data_len -= prot_bytes;
+                               prot_iter = data_iter;
+                               iov_iter_advance(&data_iter, prot_bytes);
                        }
                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
                        task_attr = v_req_pi.task_attr;
@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        cdb = &v_req.cdb[0];
                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
                }
-               exp_data_len = 0;
-               for (i = 0; i < data_niov; i++)
-                       exp_data_len += vq->iov[data_first + i].iov_len;
                /*
-                * Check that the recieved CDB size does not exceeded our
-                * hardcoded max for vhost-scsi
+                * Check that the received CDB size does not exceeded our
+                * hardcoded max for vhost-scsi, then get a pre-allocated
+                * cmd descriptor for the new virtio-scsi tag.
                 *
                 * TODO what if cdb was too small for varlen cdb header?
                 */
-               if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
+               if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
-                               scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
-                       goto err_cmd;
+                               scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
                }
-
                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
                                         exp_data_len + prot_bytes,
                                         data_direction);
                if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
-                                       PTR_ERR(cmd));
-                       goto err_cmd;
+                              PTR_ERR(cmd));
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
                }
-
-               pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
-                       ": %d\n", cmd, exp_data_len, data_direction);
-
                cmd->tvc_vhost = vs;
                cmd->tvc_vq = vq;
-               cmd->tvc_resp = vq->iov[out].iov_base;
+               cmd->tvc_resp_iov = &vq->iov[out];
+               cmd->tvc_in_iovs = in;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
-                       cmd->tvc_cdb[0], cmd->tvc_lun);
+                        cmd->tvc_cdb[0], cmd->tvc_lun);
+               pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
+                        " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
 
-               if (prot_niov) {
-                       ret = vhost_scsi_map_iov_to_prot(cmd,
-                                       &vq->iov[prot_first], prot_niov,
-                                       data_direction == DMA_FROM_DEVICE);
-                       if (unlikely(ret)) {
-                               vq_err(vq, "Failed to map iov to"
-                                       " prot_sgl\n");
-                               goto err_free;
-                       }
-               }
                if (data_direction != DMA_NONE) {
-                       ret = vhost_scsi_map_iov_to_sgl(cmd,
-                                       &vq->iov[data_first], data_niov,
-                                       data_direction == DMA_FROM_DEVICE);
+                       ret = vhost_scsi_mapal(cmd,
+                                              prot_bytes, &prot_iter,
+                                              exp_data_len, &data_iter);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
-                               goto err_free;
+                               vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
+                               vhost_scsi_send_bad_target(vs, vq, head, out);
+                               continue;
                        }
                }
                /*
                 * Save the descriptor from vhost_get_vq_desc() to be used to
                 * complete the virtio-scsi request in TCM callback context via
-                * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+                * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
                 */
                cmd->tvc_vq_desc = head;
                /*
-                * Dispatch tv_cmd descriptor for cmwq execution in process
-                * context provided by tcm_vhost_workqueue.  This also ensures
-                * tv_cmd is executed on the same kworker CPU as this vhost
-                * thread to gain positive L2 cache locality effects..
+                * Dispatch cmd descriptor for cmwq execution in process
+                * context provided by vhost_scsi_workqueue.  This also ensures
+                * cmd is executed on the same kworker CPU as this vhost
+                * thread to gain positive L2 cache locality effects.
                 */
-               INIT_WORK(&cmd->work, tcm_vhost_submission_work);
-               queue_work(tcm_vhost_workqueue, &cmd->work);
+               INIT_WORK(&cmd->work, vhost_scsi_submission_work);
+               queue_work(vhost_scsi_workqueue, &cmd->work);
        }
-
-       mutex_unlock(&vq->mutex);
-       return;
-
-err_free:
-       vhost_scsi_free_cmd(cmd);
-err_cmd:
-       vhost_scsi_send_bad_target(vs, vq, head, out);
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 }
 
 static void
-tcm_vhost_send_evt(struct vhost_scsi *vs,
-                  struct tcm_vhost_tpg *tpg,
+vhost_scsi_send_evt(struct vhost_scsi *vs,
+                  struct vhost_scsi_tpg *tpg,
                   struct se_lun *lun,
                   u32 event,
                   u32 reason)
 {
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
 
-       evt = tcm_vhost_allocate_evt(vs, event, reason);
+       evt = vhost_scsi_allocate_evt(vs, event, reason);
        if (!evt)
                return;
 
@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
                 * lun[4-7] need to be zero according to virtio-scsi spec.
                 */
                evt->event.lun[0] = 0x01;
-               evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+               evt->event.lun[1] = tpg->tport_tpgt;
                if (lun->unpacked_lun >= 256)
                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
                goto out;
 
        if (vs->vs_events_missed)
-               tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+               vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
        int i;
 
        /* Init new inflight and remember the old inflight */
-       tcm_vhost_init_inflight(vs, old_inflight);
+       vhost_scsi_init_inflight(vs, old_inflight);
 
        /*
         * The inflight->kref was initialized to 1. We decrement it here to
@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
         * when all the reqs are finished.
         */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
-               kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
+               kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
 
        /* Flush both the vhost poll and vhost work */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
- * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ * vhost_scsi_tpg with an active struct vhost_scsi_nexus
  *
  *  The lock nesting rule is:
- *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int
 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                        struct vhost_scsi_target *t)
 {
        struct se_portal_group *se_tpg;
-       struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tpg;
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tport *tv_tport;
+       struct vhost_scsi_tpg *tpg;
+       struct vhost_scsi_tpg **vs_tpg;
        struct vhost_virtqueue *vq;
        int index, ret, i, len;
        bool match = false;
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
 
        /* Verify that ring has been setup correctly. */
@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
        if (vs->vs_tpg)
                memcpy(vs_tpg, vs->vs_tpg, len);
 
-       list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
+       list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
                mutex_lock(&tpg->tv_tpg_mutex);
                if (!tpg->tpg_nexus) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
 
 out:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                          struct vhost_scsi_target *t)
 {
        struct se_portal_group *se_tpg;
-       struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tpg;
+       struct vhost_scsi_tport *tv_tport;
+       struct vhost_scsi_tpg *tpg;
        struct vhost_virtqueue *vq;
        bool match = false;
        int index, ret, i;
        u8 target;
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
        /* Verify that ring has been setup correctly. */
        for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
        vs->vs_tpg = NULL;
        WARN_ON(vs->vs_events_nr);
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return 0;
 
 err_tpg:
        mutex_unlock(&tpg->tv_tpg_mutex);
 err_dev:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                goto err_vqs;
 
        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
-       vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
+       vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
 
        vs->vs_events_nr = 0;
        vs->vs_events_missed = false;
@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        }
        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
 
-       tcm_vhost_init_inflight(vs, NULL);
+       vhost_scsi_init_inflight(vs, NULL);
 
        f->private_data = vs;
        return 0;
@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void)
        return misc_deregister(&vhost_scsi_misc);
 }
 
-static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
 {
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
 }
 
 static void
-tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
                  struct se_lun *lun, bool plug)
 {
 
@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        mutex_lock(&vq->mutex);
        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
-               tcm_vhost_send_evt(vs, tpg, lun,
+               vhost_scsi_send_evt(vs, tpg, lun,
                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
        mutex_unlock(&vq->mutex);
        mutex_unlock(&vs->dev.mutex);
 }
 
-static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-       tcm_vhost_do_plug(tpg, lun, true);
+       vhost_scsi_do_plug(tpg, lun, true);
 }
 
-static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-       tcm_vhost_do_plug(tpg, lun, false);
+       vhost_scsi_do_plug(tpg, lun, false);
 }
 
-static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
+static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
                               struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count++;
        mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotplug(tpg, lun);
+       vhost_scsi_hotplug(tpg, lun);
 
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
 
        return 0;
 }
 
-static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
+static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
                                  struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count--;
        mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotunplug(tpg, lun);
+       vhost_scsi_hotunplug(tpg, lun);
 
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
 }
 
 static struct se_node_acl *
-tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
+vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
                       struct config_group *group,
                       const char *name)
 {
        struct se_node_acl *se_nacl, *se_nacl_new;
-       struct tcm_vhost_nacl *nacl;
+       struct vhost_scsi_nacl *nacl;
        u64 wwpn = 0;
        u32 nexus_depth;
 
-       /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+       /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
                return ERR_PTR(-EINVAL); */
-       se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+       se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
        if (!se_nacl_new)
                return ERR_PTR(-ENOMEM);
 
@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
                                name, nexus_depth);
        if (IS_ERR(se_nacl)) {
-               tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+               vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
                return se_nacl;
        }
        /*
-        * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+        * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
         */
-       nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+       nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
        nacl->iport_wwpn = wwpn;
 
        return se_nacl;
 }
 
-static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
 {
-       struct tcm_vhost_nacl *nacl = container_of(se_acl,
-                               struct tcm_vhost_nacl, se_node_acl);
+       struct vhost_scsi_nacl *nacl = container_of(se_acl,
+                               struct vhost_scsi_nacl, se_node_acl);
        core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
        kfree(nacl);
 }
 
-static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
+static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
                                       struct se_session *se_sess)
 {
-       struct tcm_vhost_cmd *tv_cmd;
+       struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
 
        if (!se_sess->sess_cmd_map)
                return;
 
-       for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-               tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+       for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+               tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
                kfree(tv_cmd->tvc_sgl);
                kfree(tv_cmd->tvc_prot_sgl);
@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
        }
 }
 
-static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
                                const char *name)
 {
        struct se_portal_group *se_tpg;
        struct se_session *se_sess;
-       struct tcm_vhost_nexus *tv_nexus;
-       struct tcm_vhost_cmd *tv_cmd;
+       struct vhost_scsi_nexus *tv_nexus;
+       struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
 
        mutex_lock(&tpg->tv_tpg_mutex);
@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        }
        se_tpg = &tpg->se_tpg;
 
-       tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+       tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
        if (!tv_nexus) {
                mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+               pr_err("Unable to allocate struct vhost_scsi_nexus\n");
                return -ENOMEM;
        }
        /*
         *  Initialize the struct se_session pointer and setup tagpool
-        *  for struct tcm_vhost_cmd descriptors
+        *  for struct vhost_scsi_cmd descriptors
         */
        tv_nexus->tvn_se_sess = transport_init_session_tags(
-                                       TCM_VHOST_DEFAULT_TAGS,
-                                       sizeof(struct tcm_vhost_cmd),
+                                       VHOST_SCSI_DEFAULT_TAGS,
+                                       sizeof(struct vhost_scsi_cmd),
                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
                mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                return -ENOMEM;
        }
        se_sess = tv_nexus->tvn_se_sess;
-       for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-               tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+       for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+               tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_sgl) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                }
 
                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-                                       TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
                if (!tv_cmd->tvc_upages) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                }
 
                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_prot_sgl) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        }
        /*
         * Since we are running in 'demo mode' this call with generate a
-        * struct se_node_acl for the tcm_vhost struct se_portal_group with
+        * struct se_node_acl for the vhost_scsi struct se_portal_group with
         * the SCSI Initiator port name of the passed configfs group 'name'.
         */
        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        return 0;
 
 out:
-       tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
        transport_free_session(se_sess);
        kfree(tv_nexus);
        return -ENOMEM;
 }
 
-static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
+static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
 {
        struct se_session *se_sess;
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_nexus *tv_nexus;
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tv_nexus = tpg->tpg_nexus;
@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
        }
 
        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
-               " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+               " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 
-       tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
        /*
         * Release the SCSI I_T Nexus to the emulated vhost Target Port
         */
@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
        return 0;
 }
 
-static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
                                        char *page)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_nexus *tv_nexus;
        ssize_t ret;
 
        mutex_lock(&tpg->tv_tpg_mutex);
@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
        return ret;
 }
 
-static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
                                         const char *page,
                                         size_t count)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport_wwn = tpg->tport;
-       unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport_wwn = tpg->tport;
+       unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
        int ret;
        /*
         * Shutdown the active I_T nexus if 'NULL' is passed..
         */
        if (!strncmp(page, "NULL", 4)) {
-               ret = tcm_vhost_drop_nexus(tpg);
+               ret = vhost_scsi_drop_nexus(tpg);
                return (!ret) ? count : ret;
        }
        /*
         * Otherwise make sure the passed virtual Initiator port WWN matches
-        * the fabric protocol_id set in tcm_vhost_make_tport(), and call
-        * tcm_vhost_make_nexus().
+        * the fabric protocol_id set in vhost_scsi_make_tport(), and call
+        * vhost_scsi_make_nexus().
         */
-       if (strlen(page) >= TCM_VHOST_NAMELEN) {
+       if (strlen(page) >= VHOST_SCSI_NAMELEN) {
                pr_err("Emulated NAA Sas Address: %s, exceeds"
-                               " max: %d\n", page, TCM_VHOST_NAMELEN);
+                               " max: %d\n", page, VHOST_SCSI_NAMELEN);
                return -EINVAL;
        }
-       snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+       snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
 
        ptr = strstr(i_port, "naa.");
        if (ptr) {
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
                        pr_err("Passed SAS Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[0];
@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
                        pr_err("Passed FCP Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
                        pr_err("Passed iSCSI Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[0];
@@ -2101,40 +2115,40 @@ check_newline:
        if (i_port[strlen(i_port)-1] == '\n')
                i_port[strlen(i_port)-1] = '\0';
 
-       ret = tcm_vhost_make_nexus(tpg, port_ptr);
+       ret = vhost_scsi_make_nexus(tpg, port_ptr);
        if (ret < 0)
                return ret;
 
        return count;
 }
 
-TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
 
-static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
-       &tcm_vhost_tpg_nexus.attr,
+static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
+       &vhost_scsi_tpg_nexus.attr,
        NULL,
 };
 
 static struct se_portal_group *
-tcm_vhost_make_tpg(struct se_wwn *wwn,
+vhost_scsi_make_tpg(struct se_wwn *wwn,
                   struct config_group *group,
                   const char *name)
 {
-       struct tcm_vhost_tport *tport = container_of(wwn,
-                       struct tcm_vhost_tport, tport_wwn);
+       struct vhost_scsi_tport *tport = container_of(wwn,
+                       struct vhost_scsi_tport, tport_wwn);
 
-       struct tcm_vhost_tpg *tpg;
-       unsigned long tpgt;
+       struct vhost_scsi_tpg *tpg;
+       u16 tpgt;
        int ret;
 
        if (strstr(name, "tpgt_") != name)
                return ERR_PTR(-EINVAL);
-       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+       if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
                return ERR_PTR(-EINVAL);
 
-       tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+       tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
        if (!tpg) {
-               pr_err("Unable to allocate struct tcm_vhost_tpg");
+               pr_err("Unable to allocate struct vhost_scsi_tpg");
                return ERR_PTR(-ENOMEM);
        }
        mutex_init(&tpg->tv_tpg_mutex);
@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
-       ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+       ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
                                &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
        }
-       mutex_lock(&tcm_vhost_mutex);
-       list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
+       list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
+       mutex_unlock(&vhost_scsi_mutex);
 
        return &tpg->se_tpg;
 }
 
-static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        list_del(&tpg->tv_tpg_list);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        /*
         * Release the virtual I_T Nexus for this vhost TPG
         */
-       tcm_vhost_drop_nexus(tpg);
+       vhost_scsi_drop_nexus(tpg);
        /*
         * Deregister the se_tpg from TCM..
         */
@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
 }
 
 static struct se_wwn *
-tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+vhost_scsi_make_tport(struct target_fabric_configfs *tf,
                     struct config_group *group,
                     const char *name)
 {
-       struct tcm_vhost_tport *tport;
+       struct vhost_scsi_tport *tport;
        char *ptr;
        u64 wwpn = 0;
        int off = 0;
 
-       /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+       /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
                return ERR_PTR(-EINVAL); */
 
-       tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+       tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
        if (!tport) {
-               pr_err("Unable to allocate struct tcm_vhost_tport");
+               pr_err("Unable to allocate struct vhost_scsi_tport");
                return ERR_PTR(-ENOMEM);
        }
        tport->tport_wwpn = wwpn;
@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
        return ERR_PTR(-EINVAL);
 
 check_len:
-       if (strlen(name) >= TCM_VHOST_NAMELEN) {
+       if (strlen(name) >= VHOST_SCSI_NAMELEN) {
                pr_err("Emulated %s Address: %s, exceeds"
-                       " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
-                       TCM_VHOST_NAMELEN);
+                       " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+                       VHOST_SCSI_NAMELEN);
                kfree(tport);
                return ERR_PTR(-EINVAL);
        }
-       snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+       snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
 
        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
-               " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+               " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
 
        return &tport->tport_wwn;
 }
 
-static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+static void vhost_scsi_drop_tport(struct se_wwn *wwn)
 {
-       struct tcm_vhost_tport *tport = container_of(wwn,
-                               struct tcm_vhost_tport, tport_wwn);
+       struct vhost_scsi_tport *tport = container_of(wwn,
+                               struct vhost_scsi_tport, tport_wwn);
 
        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
-               " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+               " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
                tport->tport_name);
 
        kfree(tport);
 }
 
 static ssize_t
-tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
+vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
                                char *page)
 {
        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
-               "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+               "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
 }
 
-TF_WWN_ATTR_RO(tcm_vhost, version);
+TF_WWN_ATTR_RO(vhost_scsi, version);
 
-static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
-       &tcm_vhost_wwn_version.attr,
+static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
+       &vhost_scsi_wwn_version.attr,
        NULL,
 };
 
-static struct target_core_fabric_ops tcm_vhost_ops = {
-       .get_fabric_name                = tcm_vhost_get_fabric_name,
-       .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
-       .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
-       .tpg_get_tag                    = tcm_vhost_get_tag,
-       .tpg_get_default_depth          = tcm_vhost_get_default_depth,
-       .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
-       .tpg_check_demo_mode            = tcm_vhost_check_true,
-       .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
-       .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
-       .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
-       .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
-       .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
-       .release_cmd                    = tcm_vhost_release_cmd,
+static struct target_core_fabric_ops vhost_scsi_ops = {
+       .get_fabric_name                = vhost_scsi_get_fabric_name,
+       .get_fabric_proto_ident         = vhost_scsi_get_fabric_proto_ident,
+       .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
+       .tpg_get_tag                    = vhost_scsi_get_tpgt,
+       .tpg_get_default_depth          = vhost_scsi_get_default_depth,
+       .tpg_get_pr_transport_id        = vhost_scsi_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = vhost_scsi_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = vhost_scsi_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = vhost_scsi_check_true,
+       .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
+       .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
+       .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+       .tpg_alloc_fabric_acl           = vhost_scsi_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = vhost_scsi_release_fabric_acl,
+       .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
+       .release_cmd                    = vhost_scsi_release_cmd,
        .check_stop_free                = vhost_scsi_check_stop_free,
-       .shutdown_session               = tcm_vhost_shutdown_session,
-       .close_session                  = tcm_vhost_close_session,
-       .sess_get_index                 = tcm_vhost_sess_get_index,
+       .shutdown_session               = vhost_scsi_shutdown_session,
+       .close_session                  = vhost_scsi_close_session,
+       .sess_get_index                 = vhost_scsi_sess_get_index,
        .sess_get_initiator_sid         = NULL,
-       .write_pending                  = tcm_vhost_write_pending,
-       .write_pending_status           = tcm_vhost_write_pending_status,
-       .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
-       .get_task_tag                   = tcm_vhost_get_task_tag,
-       .get_cmd_state                  = tcm_vhost_get_cmd_state,
-       .queue_data_in                  = tcm_vhost_queue_data_in,
-       .queue_status                   = tcm_vhost_queue_status,
-       .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
-       .aborted_task                   = tcm_vhost_aborted_task,
+       .write_pending                  = vhost_scsi_write_pending,
+       .write_pending_status           = vhost_scsi_write_pending_status,
+       .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
+       .get_task_tag                   = vhost_scsi_get_task_tag,
+       .get_cmd_state                  = vhost_scsi_get_cmd_state,
+       .queue_data_in                  = vhost_scsi_queue_data_in,
+       .queue_status                   = vhost_scsi_queue_status,
+       .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
+       .aborted_task                   = vhost_scsi_aborted_task,
        /*
         * Setup callers for generic logic in target_core_fabric_configfs.c
         */
-       .fabric_make_wwn                = tcm_vhost_make_tport,
-       .fabric_drop_wwn                = tcm_vhost_drop_tport,
-       .fabric_make_tpg                = tcm_vhost_make_tpg,
-       .fabric_drop_tpg                = tcm_vhost_drop_tpg,
-       .fabric_post_link               = tcm_vhost_port_link,
-       .fabric_pre_unlink              = tcm_vhost_port_unlink,
+       .fabric_make_wwn                = vhost_scsi_make_tport,
+       .fabric_drop_wwn                = vhost_scsi_drop_tport,
+       .fabric_make_tpg                = vhost_scsi_make_tpg,
+       .fabric_drop_tpg                = vhost_scsi_drop_tpg,
+       .fabric_post_link               = vhost_scsi_port_link,
+       .fabric_pre_unlink              = vhost_scsi_port_unlink,
        .fabric_make_np                 = NULL,
        .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
-       .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
+       .fabric_make_nodeacl            = vhost_scsi_make_nodeacl,
+       .fabric_drop_nodeacl            = vhost_scsi_drop_nodeacl,
 };
 
-static int tcm_vhost_register_configfs(void)
+static int vhost_scsi_register_configfs(void)
 {
        struct target_fabric_configfs *fabric;
        int ret;
 
-       pr_debug("TCM_VHOST fabric module %s on %s/%s"
-               " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+       pr_debug("vhost-scsi fabric module %s on %s/%s"
+               " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
        /*
         * Register the top level struct config_item_type with TCM core
@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void)
                return PTR_ERR(fabric);
        }
        /*
-        * Setup fabric->tf_ops from our local tcm_vhost_ops
+        * Setup fabric->tf_ops from our local vhost_scsi_ops
         */
-       fabric->tf_ops = tcm_vhost_ops;
+       fabric->tf_ops = vhost_scsi_ops;
        /*
         * Setup default attribute lists for various fabric->tf_cit_tmpl
         */
-       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
-       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
        fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
        fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
        fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void)
        /*
         * Setup our local pointer to *fabric
         */
-       tcm_vhost_fabric_configfs = fabric;
-       pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+       vhost_scsi_fabric_configfs = fabric;
+       pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
        return 0;
 };
 
-static void tcm_vhost_deregister_configfs(void)
+static void vhost_scsi_deregister_configfs(void)
 {
-       if (!tcm_vhost_fabric_configfs)
+       if (!vhost_scsi_fabric_configfs)
                return;
 
-       target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
-       tcm_vhost_fabric_configfs = NULL;
-       pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+       target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
+       vhost_scsi_fabric_configfs = NULL;
+       pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
 };
 
-static int __init tcm_vhost_init(void)
+static int __init vhost_scsi_init(void)
 {
        int ret = -ENOMEM;
        /*
         * Use our own dedicated workqueue for submitting I/O into
         * target core to avoid contention within system_wq.
         */
-       tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
-       if (!tcm_vhost_workqueue)
+       vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
+       if (!vhost_scsi_workqueue)
                goto out;
 
        ret = vhost_scsi_register();
        if (ret < 0)
                goto out_destroy_workqueue;
 
-       ret = tcm_vhost_register_configfs();
+       ret = vhost_scsi_register_configfs();
        if (ret < 0)
                goto out_vhost_scsi_deregister;
 
@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void)
 out_vhost_scsi_deregister:
        vhost_scsi_deregister();
 out_destroy_workqueue:
-       destroy_workqueue(tcm_vhost_workqueue);
+       destroy_workqueue(vhost_scsi_workqueue);
 out:
        return ret;
 };
 
-static void tcm_vhost_exit(void)
+static void vhost_scsi_exit(void)
 {
-       tcm_vhost_deregister_configfs();
+       vhost_scsi_deregister_configfs();
        vhost_scsi_deregister();
-       destroy_workqueue(tcm_vhost_workqueue);
+       destroy_workqueue(vhost_scsi_workqueue);
 };
 
 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
 MODULE_ALIAS("tcm_vhost");
 MODULE_LICENSE("GPL");
-module_init(tcm_vhost_init);
-module_exit(tcm_vhost_exit);
+module_init(vhost_scsi_init);
+module_exit(vhost_scsi_exit);
index 00b2286382743203027b834c300a62745caa4a3f..b546da5d8ea32312cf1c93a4164ffb37c34e7272 100644 (file)
@@ -12,16 +12,32 @@ config VIRTIO_PCI
        depends on PCI
        select VIRTIO
        ---help---
-         This drivers provides support for virtio based paravirtual device
+         This driver provides support for virtio based paravirtual device
          drivers over PCI.  This requires that your VMM has appropriate PCI
          virtio backends.  Most QEMU based VMMs should support these devices
          (like KVM or Xen).
 
-         Currently, the ABI is not considered stable so there is no guarantee
-         that this version of the driver will work with your VMM.
-
          If unsure, say M.
 
+config VIRTIO_PCI_LEGACY
+       bool "Support for legacy virtio draft 0.9.X and older devices"
+       default y
+       depends on VIRTIO_PCI
+       ---help---
+          Virtio PCI Card 0.9.X Draft (circa 2014) and older device support.
+
+         This option enables building a transitional driver, supporting
+         both devices conforming to Virtio 1 specification, and legacy devices.
+         If disabled, you get a slightly smaller, non-transitional driver,
+         with no legacy compatibility.
+
+          So look out into your driveway.  Do you have a flying car?  If
+          so, you can happily disable this option and virtio will not
+          break.  Otherwise, leave it set.  Unless you're testing what
+          life will be like in The Future.
+
+         If unsure, say Y.
+
 config VIRTIO_BALLOON
        tristate "Virtio balloon driver"
        depends on VIRTIO
index bf5104b56894732995b8b0567f62425b9c104779..d85565b8ea46dd3f0ccfb6a0ed8c33418483a9f3 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
 obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
 obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
-virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o
+virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
+virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
 obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
index b9f70dfc4751858da43f4f437ef10ac675d6340f..5ce2aa48fc6e0943031f28c2a51d639e71b52f36 100644 (file)
@@ -236,7 +236,10 @@ static int virtio_dev_probe(struct device *_d)
        if (err)
                goto err;
 
-       add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+       /* If probe didn't do it, mark device DRIVER_OK ourselves. */
+       if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
+               virtio_device_ready(dev);
+
        if (drv->scan)
                drv->scan(dev);
 
index 50c5f42d7a9f3010c5f12275b7fd540161900392..0413157f3b49c230aaa7a775564086144977803f 100644 (file)
@@ -44,8 +44,7 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
 module_param(oom_pages, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
 
-struct virtio_balloon
-{
+struct virtio_balloon {
        struct virtio_device *vdev;
        struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
 
@@ -466,6 +465,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
        struct virtio_balloon *vb;
        int err;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
        if (!vb) {
                err = -ENOMEM;
index 00d115b22bd8b322be6b5d7b090eaa4fa42eab11..cad569890908de40ba4d72edd6562fb87b6e14b5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Virtio memory mapped device driver
  *
- * Copyright 2011, ARM Ltd.
+ * Copyright 2011-2014, ARM Ltd.
  *
  * This module allows virtio devices to be used over a virtual, memory mapped
  * platform device.
  *
  *
  *
- * Registers layout (all 32-bit wide):
- *
- * offset d. name             description
- * ------ -- ---------------- -----------------
- *
- * 0x000  R  MagicValue       Magic value "virt"
- * 0x004  R  Version          Device version (current max. 1)
- * 0x008  R  DeviceID         Virtio device ID
- * 0x00c  R  VendorID         Virtio vendor ID
- *
- * 0x010  R  HostFeatures     Features supported by the host
- * 0x014  W  HostFeaturesSel  Set of host features to access via HostFeatures
- *
- * 0x020  W  GuestFeatures    Features activated by the guest
- * 0x024  W  GuestFeaturesSel Set of activated features to set via GuestFeatures
- * 0x028  W  GuestPageSize    Size of guest's memory page in bytes
- *
- * 0x030  W  QueueSel         Queue selector
- * 0x034  R  QueueNumMax      Maximum size of the currently selected queue
- * 0x038  W  QueueNum         Queue size for the currently selected queue
- * 0x03c  W  QueueAlign       Used Ring alignment for the current queue
- * 0x040  RW QueuePFN         PFN for the currently selected queue
- *
- * 0x050  W  QueueNotify      Queue notifier
- * 0x060  R  InterruptStatus  Interrupt status register
- * 0x064  W  InterruptACK     Interrupt acknowledge register
- * 0x070  RW Status           Device status register
- *
- * 0x100+ RW                  Device-specific configuration space
- *
  * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
  *
  * This work is licensed under the terms of the GNU GPL, version 2 or later.
@@ -145,11 +115,16 @@ struct virtio_mmio_vq_info {
 static u64 vm_get_features(struct virtio_device *vdev)
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+       u64 features;
+
+       writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
+       features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
+       features <<= 32;
 
-       /* TODO: Features > 32 bits */
-       writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL);
+       writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
+       features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
 
-       return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
+       return features;
 }
 
 static int vm_finalize_features(struct virtio_device *vdev)
@@ -159,11 +134,20 @@ static int vm_finalize_features(struct virtio_device *vdev)
        /* Give virtio_ring a chance to accept features. */
        vring_transport_features(vdev);
 
-       /* Make sure we don't have any features > 32 bits! */
-       BUG_ON((u32)vdev->features != vdev->features);
+       /* Make sure there is are no mixed devices */
+       if (vm_dev->version == 2 &&
+                       !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+               dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
+               return -EINVAL;
+       }
+
+       writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
+       writel((u32)(vdev->features >> 32),
+                       vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
 
-       writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
-       writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
+       writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
+       writel((u32)vdev->features,
+                       vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
 
        return 0;
 }
@@ -275,7 +259,12 @@ static void vm_del_vq(struct virtqueue *vq)
 
        /* Select and deactivate the queue */
        writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
-       writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+       if (vm_dev->version == 1) {
+               writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+       } else {
+               writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
+               WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
+       }
 
        size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
        free_pages_exact(info->queue, size);
@@ -312,7 +301,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
        writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
 
        /* Queue shouldn't already be set up. */
-       if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
+       if (readl(vm_dev->base + (vm_dev->version == 1 ?
+                       VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
                err = -ENOENT;
                goto error_available;
        }
@@ -356,13 +346,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
                info->num /= 2;
        }
 
-       /* Activate the queue */
-       writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
-       writel(VIRTIO_MMIO_VRING_ALIGN,
-                       vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
-       writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
-                       vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
-
        /* Create the vring */
        vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
                                 true, info->queue, vm_notify, callback, name);
@@ -371,6 +354,33 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
                goto error_new_virtqueue;
        }
 
+       /* Activate the queue */
+       writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
+       if (vm_dev->version == 1) {
+               writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
+               writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
+                               vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+       } else {
+               u64 addr;
+
+               addr = virt_to_phys(info->queue);
+               writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
+               writel((u32)(addr >> 32),
+                               vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
+
+               addr = virt_to_phys(virtqueue_get_avail(vq));
+               writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
+               writel((u32)(addr >> 32),
+                               vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
+
+               addr = virt_to_phys(virtqueue_get_used(vq));
+               writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
+               writel((u32)(addr >> 32),
+                               vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
+
+               writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
+       }
+
        vq->priv = info;
        info->vq = vq;
 
@@ -381,7 +391,12 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
        return vq;
 
 error_new_virtqueue:
-       writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+       if (vm_dev->version == 1) {
+               writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+       } else {
+               writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
+               WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
+       }
        free_pages_exact(info->queue, size);
 error_alloc_pages:
        kfree(info);
@@ -476,16 +491,32 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 
        /* Check device version */
        vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
-       if (vm_dev->version != 1) {
+       if (vm_dev->version < 1 || vm_dev->version > 2) {
                dev_err(&pdev->dev, "Version %ld not supported!\n",
                                vm_dev->version);
                return -ENXIO;
        }
 
        vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
+       if (vm_dev->vdev.id.device == 0) {
+               /*
+                * virtio-mmio device with an ID 0 is a (dummy) placeholder
+                * with no function. End probing now with no error reported.
+                */
+               return -ENODEV;
+       }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
-       writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+       /* Reject legacy-only IDs for version 2 devices */
+       if (vm_dev->version == 2 &&
+                       virtio_device_is_legacy_only(vm_dev->vdev.id)) {
+               dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
+                               vm_dev->vdev.id.device);
+               return -ENODEV;
+       }
+
+       if (vm_dev->version == 1)
+               writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 
        platform_set_drvdata(pdev, vm_dev);
 
index 9756f21b809e080d1d1975b0734cb82cdea6e9e3..e894eb278d8336d018d3e6e8c29556dc9b5f3cb5 100644 (file)
 
 #include "virtio_pci_common.h"
 
+static bool force_legacy = false;
+
+#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
+module_param(force_legacy, bool, 0444);
+MODULE_PARM_DESC(force_legacy,
+                "Force legacy mode for transitional virtio 1 devices");
+#endif
+
 /* wait for pending irq handlers */
 void vp_synchronize_vectors(struct virtio_device *vdev)
 {
@@ -464,15 +472,97 @@ static const struct pci_device_id virtio_pci_id_table[] = {
 
 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
 
+static void virtio_pci_release_dev(struct device *_d)
+{
+       struct virtio_device *vdev = dev_to_virtio(_d);
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       /* As struct device is a kobject, it's not safe to
+        * free the memory (including the reference counter itself)
+        * until it's release callback. */
+       kfree(vp_dev);
+}
+
 static int virtio_pci_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id)
 {
-       return virtio_pci_legacy_probe(pci_dev, id);
+       struct virtio_pci_device *vp_dev;
+       int rc;
+
+       /* allocate our structure and fill it out */
+       vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
+       if (!vp_dev)
+               return -ENOMEM;
+
+       pci_set_drvdata(pci_dev, vp_dev);
+       vp_dev->vdev.dev.parent = &pci_dev->dev;
+       vp_dev->vdev.dev.release = virtio_pci_release_dev;
+       vp_dev->pci_dev = pci_dev;
+       INIT_LIST_HEAD(&vp_dev->virtqueues);
+       spin_lock_init(&vp_dev->lock);
+
+       /* Disable MSI/MSIX to bring device to a known good state. */
+       pci_msi_off(pci_dev);
+
+       /* enable the device */
+       rc = pci_enable_device(pci_dev);
+       if (rc)
+               goto err_enable_device;
+
+       rc = pci_request_regions(pci_dev, "virtio-pci");
+       if (rc)
+               goto err_request_regions;
+
+       if (force_legacy) {
+               rc = virtio_pci_legacy_probe(vp_dev);
+               /* Also try modern mode if we can't map BAR0 (no IO space). */
+               if (rc == -ENODEV || rc == -ENOMEM)
+                       rc = virtio_pci_modern_probe(vp_dev);
+               if (rc)
+                       goto err_probe;
+       } else {
+               rc = virtio_pci_modern_probe(vp_dev);
+               if (rc == -ENODEV)
+                       rc = virtio_pci_legacy_probe(vp_dev);
+               if (rc)
+                       goto err_probe;
+       }
+
+       pci_set_master(pci_dev);
+
+       rc = register_virtio_device(&vp_dev->vdev);
+       if (rc)
+               goto err_register;
+
+       return 0;
+
+err_register:
+       if (vp_dev->ioaddr)
+            virtio_pci_legacy_remove(vp_dev);
+       else
+            virtio_pci_modern_remove(vp_dev);
+err_probe:
+       pci_release_regions(pci_dev);
+err_request_regions:
+       pci_disable_device(pci_dev);
+err_enable_device:
+       kfree(vp_dev);
+       return rc;
 }
 
 static void virtio_pci_remove(struct pci_dev *pci_dev)
 {
-     virtio_pci_legacy_remove(pci_dev);
+       struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+
+       unregister_virtio_device(&vp_dev->vdev);
+
+       if (vp_dev->ioaddr)
+               virtio_pci_legacy_remove(vp_dev);
+       else
+               virtio_pci_modern_remove(vp_dev);
+
+       pci_release_regions(pci_dev);
+       pci_disable_device(pci_dev);
 }
 
 static struct pci_driver virtio_pci_driver = {
index 5a497289b7e9c336d1478db41ca5c0f60fbbafb9..28ee4e56badf1575d8d1c590850902fb7a54d30b 100644 (file)
@@ -53,12 +53,32 @@ struct virtio_pci_device {
        struct virtio_device vdev;
        struct pci_dev *pci_dev;
 
+       /* In legacy mode, these two point to within ->legacy. */
+       /* Where to read and clear interrupt */
+       u8 __iomem *isr;
+
+       /* Modern only fields */
+       /* The IO mapping for the PCI config space (non-legacy mode) */
+       struct virtio_pci_common_cfg __iomem *common;
+       /* Device-specific data (non-legacy mode)  */
+       void __iomem *device;
+       /* Base of vq notifications (non-legacy mode). */
+       void __iomem *notify_base;
+
+       /* So we can sanity-check accesses. */
+       size_t notify_len;
+       size_t device_len;
+
+       /* Capability for when we need to map notifications per-vq. */
+       int notify_map_cap;
+
+       /* Multiply queue_notify_off by this value. (non-legacy mode). */
+       u32 notify_offset_multiplier;
+
+       /* Legacy only field */
        /* the IO mapping for the PCI config space */
        void __iomem *ioaddr;
 
-       /* the IO mapping for ISR operation */
-       void __iomem *isr;
-
        /* a list of queues so we can dispatch IRQs */
        spinlock_t lock;
        struct list_head virtqueues;
@@ -127,8 +147,19 @@ const char *vp_bus_name(struct virtio_device *vdev);
  */
 int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
 
-int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
-                           const struct pci_device_id *id);
-void virtio_pci_legacy_remove(struct pci_dev *pci_dev);
+#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
+int virtio_pci_legacy_probe(struct virtio_pci_device *);
+void virtio_pci_legacy_remove(struct virtio_pci_device *);
+#else
+static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
+{
+       return -ENODEV;
+}
+static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
+{
+}
+#endif
+int virtio_pci_modern_probe(struct virtio_pci_device *);
+void virtio_pci_modern_remove(struct virtio_pci_device *);
 
 #endif
index a5486e65e04bd55d5c64a33d3dbeeadb27dd4857..256a5278a515deb1b8060386eb74bcf344a8fd66 100644 (file)
@@ -211,23 +211,10 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .set_vq_affinity = vp_set_vq_affinity,
 };
 
-static void virtio_pci_release_dev(struct device *_d)
-{
-       struct virtio_device *vdev = dev_to_virtio(_d);
-       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
-       /* As struct device is a kobject, it's not safe to
-        * free the memory (including the reference counter itself)
-        * until it's release callback. */
-       kfree(vp_dev);
-}
-
 /* the PCI probing function */
-int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
-                           const struct pci_device_id *id)
+int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
 {
-       struct virtio_pci_device *vp_dev;
-       int err;
+       struct pci_dev *pci_dev = vp_dev->pci_dev;
 
        /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
        if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
@@ -239,41 +226,12 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                return -ENODEV;
        }
 
-       /* allocate our structure and fill it out */
-       vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
-       if (vp_dev == NULL)
-               return -ENOMEM;
-
-       vp_dev->vdev.dev.parent = &pci_dev->dev;
-       vp_dev->vdev.dev.release = virtio_pci_release_dev;
-       vp_dev->vdev.config = &virtio_pci_config_ops;
-       vp_dev->pci_dev = pci_dev;
-       INIT_LIST_HEAD(&vp_dev->virtqueues);
-       spin_lock_init(&vp_dev->lock);
-
-       /* Disable MSI/MSIX to bring device to a known good state. */
-       pci_msi_off(pci_dev);
-
-       /* enable the device */
-       err = pci_enable_device(pci_dev);
-       if (err)
-               goto out;
-
-       err = pci_request_regions(pci_dev, "virtio-pci");
-       if (err)
-               goto out_enable_device;
-
        vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
-       if (vp_dev->ioaddr == NULL) {
-               err = -ENOMEM;
-               goto out_req_regions;
-       }
+       if (!vp_dev->ioaddr)
+               return -ENOMEM;
 
        vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
 
-       pci_set_drvdata(pci_dev, vp_dev);
-       pci_set_master(pci_dev);
-
        /* we use the subsystem vendor/device id as the virtio vendor/device
         * id.  this allows us to use the same PCI vendor/device id for all
         * virtio devices and to identify the particular virtio driver by
@@ -281,36 +239,18 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
        vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
        vp_dev->vdev.id.device = pci_dev->subsystem_device;
 
+       vp_dev->vdev.config = &virtio_pci_config_ops;
+
        vp_dev->config_vector = vp_config_vector;
        vp_dev->setup_vq = setup_vq;
        vp_dev->del_vq = del_vq;
 
-       /* finally register the virtio device */
-       err = register_virtio_device(&vp_dev->vdev);
-       if (err)
-               goto out_set_drvdata;
-
        return 0;
-
-out_set_drvdata:
-       pci_iounmap(pci_dev, vp_dev->ioaddr);
-out_req_regions:
-       pci_release_regions(pci_dev);
-out_enable_device:
-       pci_disable_device(pci_dev);
-out:
-       kfree(vp_dev);
-       return err;
 }
 
-void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
+void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
 {
-       struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
-
-       unregister_virtio_device(&vp_dev->vdev);
+       struct pci_dev *pci_dev = vp_dev->pci_dev;
 
-       vp_del_vqs(&vp_dev->vdev);
        pci_iounmap(pci_dev, vp_dev->ioaddr);
-       pci_release_regions(pci_dev);
-       pci_disable_device(pci_dev);
 }
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
new file mode 100644 (file)
index 0000000..2aa38e5
--- /dev/null
@@ -0,0 +1,695 @@
+/*
+ * Virtio PCI driver - modern (virtio 1.0) device support
+ *
+ * This module allows virtio devices to be used over a virtual PCI device.
+ * This can be used with QEMU based VMMs like KVM or Xen.
+ *
+ * Copyright IBM Corp. 2007
+ * Copyright Red Hat, Inc. 2014
+ *
+ * Authors:
+ *  Anthony Liguori  <aliguori@us.ibm.com>
+ *  Rusty Russell <rusty@rustcorp.com.au>
+ *  Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#define VIRTIO_PCI_NO_LEGACY
+#include "virtio_pci_common.h"
+
+static void __iomem *map_capability(struct pci_dev *dev, int off,
+                                   size_t minlen,
+                                   u32 align,
+                                   u32 start, u32 size,
+                                   size_t *len)
+{
+       u8 bar;
+       u32 offset, length;
+       void __iomem *p;
+
+       pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
+                                                bar),
+                            &bar);
+       pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
+                            &offset);
+       pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
+                             &length);
+
+       if (length <= start) {
+               dev_err(&dev->dev,
+                       "virtio_pci: bad capability len %u (>%u expected)\n",
+                       length, start);
+               return NULL;
+       }
+
+       if (length - start < minlen) {
+               dev_err(&dev->dev,
+                       "virtio_pci: bad capability len %u (>=%zu expected)\n",
+                       length, minlen);
+               return NULL;
+       }
+
+       length -= start;
+
+       if (start + offset < offset) {
+               dev_err(&dev->dev,
+                       "virtio_pci: map wrap-around %u+%u\n",
+                       start, offset);
+               return NULL;
+       }
+
+       offset += start;
+
+       if (offset & (align - 1)) {
+               dev_err(&dev->dev,
+                       "virtio_pci: offset %u not aligned to %u\n",
+                       offset, align);
+               return NULL;
+       }
+
+       if (length > size)
+               length = size;
+
+       if (len)
+               *len = length;
+
+       if (minlen + offset < minlen ||
+           minlen + offset > pci_resource_len(dev, bar)) {
+               dev_err(&dev->dev,
+                       "virtio_pci: map virtio %zu@%u "
+                       "out of range on bar %i length %lu\n",
+                       minlen, offset,
+                       bar, (unsigned long)pci_resource_len(dev, bar));
+               return NULL;
+       }
+
+       p = pci_iomap_range(dev, bar, offset, length);
+       if (!p)
+               dev_err(&dev->dev,
+                       "virtio_pci: unable to map virtio %u@%u on bar %i\n",
+                       length, offset, bar);
+       return p;
+}
+
+static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
+{
+       iowrite32((u32)val, lo);
+       iowrite32(val >> 32, hi);
+}
+
+/* virtio config->get_features() implementation */
+static u64 vp_get_features(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       u64 features;
+
+       iowrite32(0, &vp_dev->common->device_feature_select);
+       features = ioread32(&vp_dev->common->device_feature);
+       iowrite32(1, &vp_dev->common->device_feature_select);
+       features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
+
+       return features;
+}
+
+/* virtio config->finalize_features() implementation */
+static int vp_finalize_features(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       /* Give virtio_ring a chance to accept features. */
+       vring_transport_features(vdev);
+
+       if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+               dev_err(&vdev->dev, "virtio: device uses modern interface "
+                       "but does not have VIRTIO_F_VERSION_1\n");
+               return -EINVAL;
+       }
+
+       iowrite32(0, &vp_dev->common->guest_feature_select);
+       iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
+       iowrite32(1, &vp_dev->common->guest_feature_select);
+       iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
+
+       return 0;
+}
+
+/* virtio config->get() implementation */
+static void vp_get(struct virtio_device *vdev, unsigned offset,
+                  void *buf, unsigned len)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       u8 b;
+       __le16 w;
+       __le32 l;
+
+       BUG_ON(offset + len > vp_dev->device_len);
+
+       switch (len) {
+       case 1:
+               b = ioread8(vp_dev->device + offset);
+               memcpy(buf, &b, sizeof b);
+               break;
+       case 2:
+               w = cpu_to_le16(ioread16(vp_dev->device + offset));
+               memcpy(buf, &w, sizeof w);
+               break;
+       case 4:
+               l = cpu_to_le32(ioread32(vp_dev->device + offset));
+               memcpy(buf, &l, sizeof l);
+               break;
+       case 8:
+               l = cpu_to_le32(ioread32(vp_dev->device + offset));
+               memcpy(buf, &l, sizeof l);
+               l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
+               memcpy(buf + sizeof l, &l, sizeof l);
+               break;
+       default:
+               BUG();
+       }
+}
+
+/* the config->set() implementation.  it's symmetric to the config->get()
+ * implementation */
+static void vp_set(struct virtio_device *vdev, unsigned offset,
+                  const void *buf, unsigned len)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       u8 b;
+       __le16 w;
+       __le32 l;
+
+       BUG_ON(offset + len > vp_dev->device_len);
+
+       switch (len) {
+       case 1:
+               memcpy(&b, buf, sizeof b);
+               iowrite8(b, vp_dev->device + offset);
+               break;
+       case 2:
+               memcpy(&w, buf, sizeof w);
+               iowrite16(le16_to_cpu(w), vp_dev->device + offset);
+               break;
+       case 4:
+               memcpy(&l, buf, sizeof l);
+               iowrite32(le32_to_cpu(l), vp_dev->device + offset);
+               break;
+       case 8:
+               memcpy(&l, buf, sizeof l);
+               iowrite32(le32_to_cpu(l), vp_dev->device + offset);
+               memcpy(&l, buf + sizeof l, sizeof l);
+               iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static u32 vp_generation(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       return ioread8(&vp_dev->common->config_generation);
+}
+
+/* config->{get,set}_status() implementations */
+static u8 vp_get_status(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       return ioread8(&vp_dev->common->device_status);
+}
+
+static void vp_set_status(struct virtio_device *vdev, u8 status)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       /* We should never be setting status to 0. */
+       BUG_ON(status == 0);
+       iowrite8(status, &vp_dev->common->device_status);
+}
+
+static void vp_reset(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       /* 0 status means a reset. */
+       iowrite8(0, &vp_dev->common->device_status);
+       /* Flush out the status write, and flush in device writes,
+        * including MSI-X interrupts, if any. */
+       ioread8(&vp_dev->common->device_status);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
+}
+
+static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
+{
+       /* Setup the vector used for configuration events */
+       iowrite16(vector, &vp_dev->common->msix_config);
+       /* Verify we had enough resources to assign the vector */
+       /* Will also flush the write out to device */
+       return ioread16(&vp_dev->common->msix_config);
+}
+
+static size_t vring_pci_size(u16 num)
+{
+       /* We only need a cacheline separation. */
+       return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
+}
+
+static void *alloc_virtqueue_pages(int *num)
+{
+       void *pages;
+
+       /* TODO: allocate each queue chunk individually */
+       for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
+               pages = alloc_pages_exact(vring_pci_size(*num),
+                                         GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
+               if (pages)
+                       return pages;
+       }
+
+       if (!*num)
+               return NULL;
+
+       /* Try to get a single page. You are my only hope! */
+       return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
+}
+
+static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
+                                 unsigned index,
+                                 void (*callback)(struct virtqueue *vq),
+                                 const char *name,
+                                 u16 msix_vec)
+{
+       struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+       struct virtqueue *vq;
+       u16 num, off;
+       int err;
+
+       if (index >= ioread16(&cfg->num_queues))
+               return ERR_PTR(-ENOENT);
+
+       /* Select the queue we're interested in */
+       iowrite16(index, &cfg->queue_select);
+
+       /* Check if queue is either not available or already active. */
+       num = ioread16(&cfg->queue_size);
+       if (!num || ioread16(&cfg->queue_enable))
+               return ERR_PTR(-ENOENT);
+
+       if (num & (num - 1)) {
+               dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* get offset of notification word for this vq */
+       off = ioread16(&cfg->queue_notify_off);
+
+       info->num = num;
+       info->msix_vector = msix_vec;
+
+       info->queue = alloc_virtqueue_pages(&info->num);
+       if (info->queue == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       /* create the vring */
+       vq = vring_new_virtqueue(index, info->num,
+                                SMP_CACHE_BYTES, &vp_dev->vdev,
+                                true, info->queue, vp_notify, callback, name);
+       if (!vq) {
+               err = -ENOMEM;
+               goto err_new_queue;
+       }
+
+       /* activate the queue */
+       iowrite16(num, &cfg->queue_size);
+       iowrite64_twopart(virt_to_phys(info->queue),
+                         &cfg->queue_desc_lo, &cfg->queue_desc_hi);
+       iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
+                         &cfg->queue_avail_lo, &cfg->queue_avail_hi);
+       iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
+                         &cfg->queue_used_lo, &cfg->queue_used_hi);
+
+       if (vp_dev->notify_base) {
+               /* offset should not wrap */
+               if ((u64)off * vp_dev->notify_offset_multiplier + 2
+                   > vp_dev->notify_len) {
+                       dev_warn(&vp_dev->pci_dev->dev,
+                                "bad notification offset %u (x %u) "
+                                "for queue %u > %zd",
+                                off, vp_dev->notify_offset_multiplier,
+                                index, vp_dev->notify_len);
+                       err = -EINVAL;
+                       goto err_map_notify;
+               }
+               vq->priv = (void __force *)vp_dev->notify_base +
+                       off * vp_dev->notify_offset_multiplier;
+       } else {
+               vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
+                                         vp_dev->notify_map_cap, 2, 2,
+                                         off * vp_dev->notify_offset_multiplier, 2,
+                                         NULL);
+       }
+
+       if (!vq->priv) {
+               err = -ENOMEM;
+               goto err_map_notify;
+       }
+
+       if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+               iowrite16(msix_vec, &cfg->queue_msix_vector);
+               msix_vec = ioread16(&cfg->queue_msix_vector);
+               if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
+                       err = -EBUSY;
+                       goto err_assign_vector;
+               }
+       }
+
+       return vq;
+
+err_assign_vector:
+       if (!vp_dev->notify_base)
+               pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
+err_map_notify:
+       vring_del_virtqueue(vq);
+err_new_queue:
+       free_pages_exact(info->queue, vring_pci_size(info->num));
+       return ERR_PTR(err);
+}
+
+static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+                             struct virtqueue *vqs[],
+                             vq_callback_t *callbacks[],
+                             const char *names[])
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtqueue *vq;
+       int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names);
+
+       if (rc)
+               return rc;
+
+       /* Select and activate all queues. Has to be done last: once we do
+        * this, there's no way to go back except reset.
+        */
+       list_for_each_entry(vq, &vdev->vqs, list) {
+               iowrite16(vq->index, &vp_dev->common->queue_select);
+               iowrite16(1, &vp_dev->common->queue_enable);
+       }
+
+       return 0;
+}
+
+static void del_vq(struct virtio_pci_vq_info *info)
+{
+       struct virtqueue *vq = info->vq;
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+
+       iowrite16(vq->index, &vp_dev->common->queue_select);
+
+       if (vp_dev->msix_enabled) {
+               iowrite16(VIRTIO_MSI_NO_VECTOR,
+                         &vp_dev->common->queue_msix_vector);
+               /* Flush the write out to device */
+               ioread16(&vp_dev->common->queue_msix_vector);
+       }
+
+       if (!vp_dev->notify_base)
+               pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
+
+       vring_del_virtqueue(vq);
+
+       free_pages_exact(info->queue, vring_pci_size(info->num));
+}
+
+static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
+       .get            = NULL,
+       .set            = NULL,
+       .generation     = vp_generation,
+       .get_status     = vp_get_status,
+       .set_status     = vp_set_status,
+       .reset          = vp_reset,
+       .find_vqs       = vp_modern_find_vqs,
+       .del_vqs        = vp_del_vqs,
+       .get_features   = vp_get_features,
+       .finalize_features = vp_finalize_features,
+       .bus_name       = vp_bus_name,
+       .set_vq_affinity = vp_set_vq_affinity,
+};
+
+static const struct virtio_config_ops virtio_pci_config_ops = {
+       .get            = vp_get,
+       .set            = vp_set,
+       .generation     = vp_generation,
+       .get_status     = vp_get_status,
+       .set_status     = vp_set_status,
+       .reset          = vp_reset,
+       .find_vqs       = vp_modern_find_vqs,
+       .del_vqs        = vp_del_vqs,
+       .get_features   = vp_get_features,
+       .finalize_features = vp_finalize_features,
+       .bus_name       = vp_bus_name,
+       .set_vq_affinity = vp_set_vq_affinity,
+};
+
+/**
+ * virtio_pci_find_capability - walk capabilities to find device info.
+ * @dev: the pci device
+ * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
+ * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
+ *
+ * Returns offset of the capability, or 0.
+ */
+static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
+                                            u32 ioresource_types)
+{
+       int pos;
+
+       for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+            pos > 0;
+            pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
+               u8 type, bar;
+               pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+                                                        cfg_type),
+                                    &type);
+               pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+                                                        bar),
+                                    &bar);
+
+               /* Ignore structures with reserved BAR values */
+               if (bar > 0x5)
+                       continue;
+
+               if (type == cfg_type) {
+                       if (pci_resource_len(dev, bar) &&
+                           pci_resource_flags(dev, bar) & ioresource_types)
+                               return pos;
+               }
+       }
+       return 0;
+}
+
+/* This is part of the ABI.  Don't screw with it. */
+static inline void check_offsets(void)
+{
+       /* Note: disk space was harmed in compilation of this function. */
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
+                    offsetof(struct virtio_pci_cap, cap_vndr));
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
+                    offsetof(struct virtio_pci_cap, cap_next));
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
+                    offsetof(struct virtio_pci_cap, cap_len));
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
+                    offsetof(struct virtio_pci_cap, cfg_type));
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
+                    offsetof(struct virtio_pci_cap, bar));
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
+                    offsetof(struct virtio_pci_cap, offset));
+       BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
+                    offsetof(struct virtio_pci_cap, length));
+       BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
+                    offsetof(struct virtio_pci_notify_cap,
+                             notify_off_multiplier));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
+                    offsetof(struct virtio_pci_common_cfg,
+                             device_feature_select));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
+                    offsetof(struct virtio_pci_common_cfg, device_feature));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
+                    offsetof(struct virtio_pci_common_cfg,
+                             guest_feature_select));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
+                    offsetof(struct virtio_pci_common_cfg, guest_feature));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
+                    offsetof(struct virtio_pci_common_cfg, msix_config));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
+                    offsetof(struct virtio_pci_common_cfg, num_queues));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
+                    offsetof(struct virtio_pci_common_cfg, device_status));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
+                    offsetof(struct virtio_pci_common_cfg, config_generation));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
+                    offsetof(struct virtio_pci_common_cfg, queue_select));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
+                    offsetof(struct virtio_pci_common_cfg, queue_size));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
+                    offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
+                    offsetof(struct virtio_pci_common_cfg, queue_enable));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
+                    offsetof(struct virtio_pci_common_cfg, queue_notify_off));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
+                    offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
+                    offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
+                    offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
+                    offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
+                    offsetof(struct virtio_pci_common_cfg, queue_used_lo));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
+                    offsetof(struct virtio_pci_common_cfg, queue_used_hi));
+}
+
+/* the PCI probing function */
+int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
+{
+       struct pci_dev *pci_dev = vp_dev->pci_dev;
+       int err, common, isr, notify, device;
+       u32 notify_length;
+       u32 notify_offset;
+
+       check_offsets();
+
+       /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
+       if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
+               return -ENODEV;
+
+       if (pci_dev->device < 0x1040) {
+               /* Transitional devices: use the PCI subsystem device id as
+                * virtio device id, same as legacy driver always did.
+                */
+               vp_dev->vdev.id.device = pci_dev->subsystem_device;
+       } else {
+               /* Modern devices: simply use PCI device id, but start from 0x1040. */
+               vp_dev->vdev.id.device = pci_dev->device - 0x1040;
+       }
+       vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
+
+       if (virtio_device_is_legacy_only(vp_dev->vdev.id))
+               return -ENODEV;
+
+       /* check for a common config: if not, use legacy mode (bar 0). */
+       common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
+                                           IORESOURCE_IO | IORESOURCE_MEM);
+       if (!common) {
+               dev_info(&pci_dev->dev,
+                        "virtio_pci: leaving for legacy driver\n");
+               return -ENODEV;
+       }
+
+       /* If common is there, these should be too... */
+       isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
+                                        IORESOURCE_IO | IORESOURCE_MEM);
+       notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
+                                           IORESOURCE_IO | IORESOURCE_MEM);
+       if (!isr || !notify) {
+               dev_err(&pci_dev->dev,
+                       "virtio_pci: missing capabilities %i/%i/%i\n",
+                       common, isr, notify);
+               return -EINVAL;
+       }
+
+       /* Device capability is only mandatory for devices that have
+        * device-specific configuration.
+        */
+       device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
+                                           IORESOURCE_IO | IORESOURCE_MEM);
+
+       err = -EINVAL;
+       vp_dev->common = map_capability(pci_dev, common,
+                                       sizeof(struct virtio_pci_common_cfg), 4,
+                                       0, sizeof(struct virtio_pci_common_cfg),
+                                       NULL);
+       if (!vp_dev->common)
+               goto err_map_common;
+       vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
+                                    0, 1,
+                                    NULL);
+       if (!vp_dev->isr)
+               goto err_map_isr;
+
+       /* Read notify_off_multiplier from config space. */
+       pci_read_config_dword(pci_dev,
+                             notify + offsetof(struct virtio_pci_notify_cap,
+                                               notify_off_multiplier),
+                             &vp_dev->notify_offset_multiplier);
+       /* Read notify length and offset from config space. */
+       pci_read_config_dword(pci_dev,
+                             notify + offsetof(struct virtio_pci_notify_cap,
+                                               cap.length),
+                             &notify_length);
+
+       pci_read_config_dword(pci_dev,
+                             notify + offsetof(struct virtio_pci_notify_cap,
+                                               cap.length),
+                             &notify_offset);
+
+       /* We don't know how many VQs we'll map, ahead of the time.
+        * If notify length is small, map it all now.
+        * Otherwise, map each VQ individually later.
+        */
+       if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
+               vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
+                                                    0, notify_length,
+                                                    &vp_dev->notify_len);
+               if (!vp_dev->notify_base)
+                       goto err_map_notify;
+       } else {
+               vp_dev->notify_map_cap = notify;
+       }
+
+       /* Again, we don't know how much we should map, but PAGE_SIZE
+        * is more than enough for all existing devices.
+        */
+       if (device) {
+               vp_dev->device = map_capability(pci_dev, device, 0, 4,
+                                               0, PAGE_SIZE,
+                                               &vp_dev->device_len);
+               if (!vp_dev->device)
+                       goto err_map_device;
+
+               vp_dev->vdev.config = &virtio_pci_config_ops;
+       } else {
+               vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
+       }
+
+       vp_dev->config_vector = vp_config_vector;
+       vp_dev->setup_vq = setup_vq;
+       vp_dev->del_vq = del_vq;
+
+       return 0;
+
+err_map_device:
+       if (vp_dev->notify_base)
+               pci_iounmap(pci_dev, vp_dev->notify_base);
+err_map_notify:
+       pci_iounmap(pci_dev, vp_dev->isr);
+err_map_isr:
+       pci_iounmap(pci_dev, vp_dev->common);
+err_map_common:
+       return err;
+}
+
+void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
+{
+       struct pci_dev *pci_dev = vp_dev->pci_dev;
+
+       if (vp_dev->device)
+               pci_iounmap(pci_dev, vp_dev->device);
+       if (vp_dev->notify_base)
+               pci_iounmap(pci_dev, vp_dev->notify_base);
+       pci_iounmap(pci_dev, vp_dev->isr);
+       pci_iounmap(pci_dev, vp_dev->common);
+}
index 00ec6b3f96b2bf17abc94f8d31288a01b0c9af14..096b857e7b75abad526f487c84392d863c3c6b06 100644 (file)
@@ -54,8 +54,7 @@
 #define END_USE(vq)
 #endif
 
-struct vring_virtqueue
-{
+struct vring_virtqueue {
        struct virtqueue vq;
 
        /* Actual memory layout for this queue */
@@ -245,14 +244,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
        vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
        vq->num_added++;
 
+       pr_debug("Added buffer head %i to %p\n", head, vq);
+       END_USE(vq);
+
        /* This is very unlikely, but theoretically possible.  Kick
         * just in case. */
        if (unlikely(vq->num_added == (1 << 16) - 1))
                virtqueue_kick(_vq);
 
-       pr_debug("Added buffer head %i to %p\n", head, vq);
-       END_USE(vq);
-
        return 0;
 }
 
index 08f41add146134db9c6975f277b998dee77c70f3..16f20235099768d91f10931841e25176af59ef7e 100644 (file)
@@ -505,6 +505,16 @@ config MESON_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called meson_wdt.
 
+config MEDIATEK_WATCHDOG
+       tristate "Mediatek SoCs watchdog support"
+       depends on ARCH_MEDIATEK
+       select WATCHDOG_CORE
+       help
+         Say Y here to include support for the watchdog timer
+         in Mediatek SoCs.
+         To compile this driver as a module, choose M here: the
+         module will be called mtk_wdt.
+
 # AVR32 Architecture
 
 config AT32AP700X_WDT
@@ -1005,6 +1015,8 @@ config W83627HF_WDT
                NCT6775
                NCT6776
                NCT6779
+               NCT6791
+               NCT6792
 
          This watchdog simply watches your kernel to make sure it doesn't
          freeze, and if it does, it reboots your computer after a certain
@@ -1101,7 +1113,7 @@ config ATH79_WDT
 
 config BCM47XX_WDT
        tristate "Broadcom BCM47xx Watchdog Timer"
-       depends on BCM47XX
+       depends on BCM47XX || ARCH_BCM_5301X
        select WATCHDOG_CORE
        help
          Hardware driver for the Broadcom BCM47xx Watchdog Timer.
@@ -1235,6 +1247,17 @@ config BCM_KONA_WDT_DEBUG
 
          If in doubt, say 'N'.
 
+config IMGPDC_WDT
+       tristate "Imagination Technologies PDC Watchdog Timer"
+       depends on HAS_IOMEM
+       depends on METAG || MIPS || COMPILE_TEST
+       help
+         Driver for Imagination Technologies PowerDown Controller
+         Watchdog Timer.
+
+         To compile this driver as a loadable module, choose M here.
+         The module will be called imgpdc_wdt.
+
 config LANTIQ_WDT
        tristate "Lantiq SoC watchdog"
        depends on LANTIQ
index c569ec8f8a76b9d78ad4a8ef8d61313cb1ae7ce9..5c19294d1c3015acaf14e4a52734f0c82ae798db 100644 (file)
@@ -63,6 +63,7 @@ obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
 obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
 obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
 obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
+obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -142,6 +143,7 @@ obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
 octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
 obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
 obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
+obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
 
 # PARISC Architecture
 
index 9816485f68252254907b60e744058bc32d1f3065..b28a072abf78fbe030f647ef491e8fc24bc157d4 100644 (file)
@@ -169,6 +169,17 @@ static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+static int bcm47xx_wdt_restart(struct notifier_block *this, unsigned long mode,
+                              void *cmd)
+{
+       struct bcm47xx_wdt *wdt;
+
+       wdt = container_of(this, struct bcm47xx_wdt, restart_handler);
+       wdt->timer_set(wdt, 1);
+
+       return NOTIFY_DONE;
+}
+
 static struct watchdog_ops bcm47xx_wdt_soft_ops = {
        .owner          = THIS_MODULE,
        .start          = bcm47xx_wdt_soft_start,
@@ -209,15 +220,23 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
        if (ret)
                goto err_timer;
 
-       ret = watchdog_register_device(&wdt->wdd);
+       wdt->restart_handler.notifier_call = &bcm47xx_wdt_restart;
+       wdt->restart_handler.priority = 64;
+       ret = register_restart_handler(&wdt->restart_handler);
        if (ret)
                goto err_notifier;
 
+       ret = watchdog_register_device(&wdt->wdd);
+       if (ret)
+               goto err_handler;
+
        dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n",
                timeout, nowayout ? ", nowayout" : "",
                soft ? ", Software Timer" : "");
        return 0;
 
+err_handler:
+       unregister_restart_handler(&wdt->restart_handler);
 err_notifier:
        unregister_reboot_notifier(&wdt->notifier);
 err_timer:
index 2cd6b2c2dd2a6980cbbefdb45a561fa2715f3982..e2fe2ebdebd4d6bb12bf41adb1f11c9654306963 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/mfd/da9063/registers.h>
 #include <linux/mfd/da9063/core.h>
+#include <linux/reboot.h>
 #include <linux/regmap.h>
 
 /*
@@ -38,6 +39,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
 struct da9063_watchdog {
        struct da9063 *da9063;
        struct watchdog_device wdtdev;
+       struct notifier_block restart_handler;
 };
 
 static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
@@ -119,6 +121,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
        return ret;
 }
 
+static int da9063_wdt_restart_handler(struct notifier_block *this,
+                                     unsigned long mode, void *cmd)
+{
+       struct da9063_watchdog *wdt = container_of(this,
+                                                  struct da9063_watchdog,
+                                                  restart_handler);
+       int ret;
+
+       ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F,
+                          DA9063_SHUTDOWN);
+       if (ret)
+               dev_alert(wdt->da9063->dev, "Failed to shutdown (err = %d)\n",
+                         ret);
+
+       return NOTIFY_DONE;
+}
+
 static const struct watchdog_info da9063_watchdog_info = {
        .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
        .identity = "DA9063 Watchdog",
@@ -163,14 +182,25 @@ static int da9063_wdt_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, wdt);
 
        ret = watchdog_register_device(&wdt->wdtdev);
+       if (ret)
+               return ret;
 
-       return ret;
+       wdt->restart_handler.notifier_call = da9063_wdt_restart_handler;
+       wdt->restart_handler.priority = 128;
+       ret = register_restart_handler(&wdt->restart_handler);
+       if (ret)
+               dev_err(wdt->da9063->dev,
+                       "Failed to register restart handler (err = %d)\n", ret);
+
+       return 0;
 }
 
 static int da9063_wdt_remove(struct platform_device *pdev)
 {
        struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev);
 
+       unregister_restart_handler(&wdt->restart_handler);
+
        watchdog_unregister_device(&wdt->wdtdev);
 
        return 0;
index b34a2e4e4e43c38d441a70018f088e7efd9b8608..d0bb9499d12caaa2d901b3454a0c65cf825a9ea4 100644 (file)
@@ -51,6 +51,8 @@
 /* The maximum TOP (timeout period) value that can be set in the watchdog. */
 #define DW_WDT_MAX_TOP         15
 
+#define DW_WDT_DEFAULT_SECONDS 30
+
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
@@ -96,6 +98,12 @@ static inline void dw_wdt_set_next_heartbeat(void)
        dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
 }
 
+static void dw_wdt_keepalive(void)
+{
+       writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
+              WDOG_COUNTER_RESTART_REG_OFFSET);
+}
+
 static int dw_wdt_set_top(unsigned top_s)
 {
        int i, top_val = DW_WDT_MAX_TOP;
@@ -110,21 +118,27 @@ static int dw_wdt_set_top(unsigned top_s)
                        break;
                }
 
-       /* Set the new value in the watchdog. */
+       /*
+        * Set the new value in the watchdog.  Some versions of dw_wdt
+        * have have TOPINIT in the TIMEOUT_RANGE register (as per
+        * CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1).  On those we
+        * effectively get a pat of the watchdog right here.
+        */
        writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
                dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
 
+       /*
+        * Add an explicit pat to handle versions of the watchdog that
+        * don't have TOPINIT.  This won't hurt on versions that have
+        * it.
+        */
+       dw_wdt_keepalive();
+
        dw_wdt_set_next_heartbeat();
 
        return dw_wdt_top_in_seconds(top_val);
 }
 
-static void dw_wdt_keepalive(void)
-{
-       writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
-              WDOG_COUNTER_RESTART_REG_OFFSET);
-}
-
 static int dw_wdt_restart_handle(struct notifier_block *this,
                                unsigned long mode, void *cmd)
 {
@@ -167,9 +181,9 @@ static int dw_wdt_open(struct inode *inode, struct file *filp)
        if (!dw_wdt_is_enabled()) {
                /*
                 * The watchdog is not currently enabled. Set the timeout to
-                * the maximum and then start it.
+                * something reasonable and then start it.
                 */
-               dw_wdt_set_top(DW_WDT_MAX_TOP);
+               dw_wdt_set_top(DW_WDT_DEFAULT_SECONDS);
                writel(WDOG_CONTROL_REG_WDT_EN_MASK,
                       dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
        }
index bbdb19b4533268cea43775f7507d6ec354d4e2eb..cbc313d37c59f1f67b0bd99fc5835d2d35a14b85 100644 (file)
@@ -31,6 +31,8 @@ struct gpio_wdt_priv {
        int                     gpio;
        bool                    active_low;
        bool                    state;
+       bool                    always_running;
+       bool                    armed;
        unsigned int            hw_algo;
        unsigned int            hw_margin;
        unsigned long           last_jiffies;
@@ -48,14 +50,20 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
                gpio_direction_input(priv->gpio);
 }
 
-static int gpio_wdt_start(struct watchdog_device *wdd)
+static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv)
 {
-       struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
-
        priv->state = priv->active_low;
        gpio_direction_output(priv->gpio, priv->state);
        priv->last_jiffies = jiffies;
        mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+}
+
+static int gpio_wdt_start(struct watchdog_device *wdd)
+{
+       struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+       gpio_wdt_start_impl(priv);
+       priv->armed = true;
 
        return 0;
 }
@@ -64,8 +72,11 @@ static int gpio_wdt_stop(struct watchdog_device *wdd)
 {
        struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
 
-       mod_timer(&priv->timer, 0);
-       gpio_wdt_disable(priv);
+       priv->armed = false;
+       if (!priv->always_running) {
+               mod_timer(&priv->timer, 0);
+               gpio_wdt_disable(priv);
+       }
 
        return 0;
 }
@@ -91,8 +102,8 @@ static void gpio_wdt_hwping(unsigned long data)
        struct watchdog_device *wdd = (struct watchdog_device *)data;
        struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
 
-       if (time_after(jiffies, priv->last_jiffies +
-                      msecs_to_jiffies(wdd->timeout * 1000))) {
+       if (priv->armed && time_after(jiffies, priv->last_jiffies +
+                                     msecs_to_jiffies(wdd->timeout * 1000))) {
                dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
                return;
        }
@@ -197,6 +208,9 @@ static int gpio_wdt_probe(struct platform_device *pdev)
        /* Use safe value (1/2 of real timeout) */
        priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
 
+       priv->always_running = of_property_read_bool(pdev->dev.of_node,
+                                                    "always-running");
+
        watchdog_set_drvdata(&priv->wdd, priv);
 
        priv->wdd.info          = &gpio_wdt_ident;
@@ -216,8 +230,15 @@ static int gpio_wdt_probe(struct platform_device *pdev)
        priv->notifier.notifier_call = gpio_wdt_notify_sys;
        ret = register_reboot_notifier(&priv->notifier);
        if (ret)
-               watchdog_unregister_device(&priv->wdd);
+               goto error_unregister;
 
+       if (priv->always_running)
+               gpio_wdt_start_impl(priv);
+
+       return 0;
+
+error_unregister:
+       watchdog_unregister_device(&priv->wdd);
        return ret;
 }
 
index 75d2243b94f51ccc2c8b250b46b30d0866c6b3b6..ada3e44f99328424d11994ec03ebdd4886f4a919 100644 (file)
@@ -745,7 +745,7 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
 
        dev_info(&dev->dev,
                        "HP Watchdog Timer Driver: NMI decoding initialized"
-                       ", allow kernel dump: %s (default = 0/OFF)\n",
+                       ", allow kernel dump: %s (default = 1/ON)\n",
                        (allow_kdump == 0) ? "OFF" : "ON");
        return 0;
 
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
new file mode 100644 (file)
index 0000000..c8def68
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Imagination Technologies PowerDown Controller Watchdog Timer.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione
+ *                                                     2012 Henrik Nordstrom
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+/* registers */
+#define PDC_WDT_SOFT_RESET             0x00
+#define PDC_WDT_CONFIG                 0x04
+  #define PDC_WDT_CONFIG_ENABLE                BIT(31)
+  #define PDC_WDT_CONFIG_DELAY_MASK    0x1f
+
+#define PDC_WDT_TICKLE1                        0x08
+#define PDC_WDT_TICKLE1_MAGIC          0xabcd1234
+#define PDC_WDT_TICKLE2                        0x0c
+#define PDC_WDT_TICKLE2_MAGIC          0x4321dcba
+
+#define PDC_WDT_TICKLE_STATUS_MASK     0x7
+#define PDC_WDT_TICKLE_STATUS_SHIFT    0
+#define PDC_WDT_TICKLE_STATUS_HRESET   0x0  /* Hard reset */
+#define PDC_WDT_TICKLE_STATUS_TIMEOUT  0x1  /* Timeout */
+#define PDC_WDT_TICKLE_STATUS_TICKLE   0x2  /* Tickled incorrectly */
+#define PDC_WDT_TICKLE_STATUS_SRESET   0x3  /* Soft reset */
+#define PDC_WDT_TICKLE_STATUS_USER     0x4  /* User reset */
+
+/* Timeout values are in seconds */
+#define PDC_WDT_MIN_TIMEOUT            1
+#define PDC_WDT_DEF_TIMEOUT            64
+
+static int heartbeat;
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
+       "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+       "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct pdc_wdt_dev {
+       struct watchdog_device wdt_dev;
+       struct clk *wdt_clk;
+       struct clk *sys_clk;
+       void __iomem *base;
+};
+
+static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev)
+{
+       struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
+
+       writel(PDC_WDT_TICKLE1_MAGIC, wdt->base + PDC_WDT_TICKLE1);
+       writel(PDC_WDT_TICKLE2_MAGIC, wdt->base + PDC_WDT_TICKLE2);
+
+       return 0;
+}
+
+static int pdc_wdt_stop(struct watchdog_device *wdt_dev)
+{
+       unsigned int val;
+       struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
+
+       val = readl(wdt->base + PDC_WDT_CONFIG);
+       val &= ~PDC_WDT_CONFIG_ENABLE;
+       writel(val, wdt->base + PDC_WDT_CONFIG);
+
+       /* Must tickle to finish the stop */
+       pdc_wdt_keepalive(wdt_dev);
+
+       return 0;
+}
+
+static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev,
+                              unsigned int new_timeout)
+{
+       unsigned int val;
+       struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
+       unsigned long clk_rate = clk_get_rate(wdt->wdt_clk);
+
+       wdt->wdt_dev.timeout = new_timeout;
+
+       val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK;
+       val |= order_base_2(new_timeout * clk_rate) - 1;
+       writel(val, wdt->base + PDC_WDT_CONFIG);
+
+       return 0;
+}
+
+/* Start the watchdog timer (delay should already be set) */
+static int pdc_wdt_start(struct watchdog_device *wdt_dev)
+{
+       unsigned int val;
+       struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
+
+       val = readl(wdt->base + PDC_WDT_CONFIG);
+       val |= PDC_WDT_CONFIG_ENABLE;
+       writel(val, wdt->base + PDC_WDT_CONFIG);
+
+       return 0;
+}
+
+static struct watchdog_info pdc_wdt_info = {
+       .identity       = "IMG PDC Watchdog",
+       .options        = WDIOF_SETTIMEOUT |
+                         WDIOF_KEEPALIVEPING |
+                         WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops pdc_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = pdc_wdt_start,
+       .stop           = pdc_wdt_stop,
+       .ping           = pdc_wdt_keepalive,
+       .set_timeout    = pdc_wdt_set_timeout,
+};
+
+static int pdc_wdt_probe(struct platform_device *pdev)
+{
+       int ret, val;
+       unsigned long clk_rate;
+       struct resource *res;
+       struct pdc_wdt_dev *pdc_wdt;
+
+       pdc_wdt = devm_kzalloc(&pdev->dev, sizeof(*pdc_wdt), GFP_KERNEL);
+       if (!pdc_wdt)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pdc_wdt->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pdc_wdt->base))
+               return PTR_ERR(pdc_wdt->base);
+
+       pdc_wdt->sys_clk = devm_clk_get(&pdev->dev, "sys");
+       if (IS_ERR(pdc_wdt->sys_clk)) {
+               dev_err(&pdev->dev, "failed to get the sys clock\n");
+               return PTR_ERR(pdc_wdt->sys_clk);
+       }
+
+       pdc_wdt->wdt_clk = devm_clk_get(&pdev->dev, "wdt");
+       if (IS_ERR(pdc_wdt->wdt_clk)) {
+               dev_err(&pdev->dev, "failed to get the wdt clock\n");
+               return PTR_ERR(pdc_wdt->wdt_clk);
+       }
+
+       ret = clk_prepare_enable(pdc_wdt->sys_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(pdc_wdt->wdt_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "could not prepare or enable wdt clock\n");
+               goto disable_sys_clk;
+       }
+
+       /* We use the clock rate to calculate the max timeout */
+       clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
+       if (clk_rate == 0) {
+               dev_err(&pdev->dev, "failed to get clock rate\n");
+               ret = -EINVAL;
+               goto disable_wdt_clk;
+       }
+
+       if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) {
+               dev_err(&pdev->dev, "invalid clock rate\n");
+               ret = -EINVAL;
+               goto disable_wdt_clk;
+       }
+
+       if (order_base_2(clk_rate) == 0)
+               pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT + 1;
+       else
+               pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT;
+
+       pdc_wdt->wdt_dev.info = &pdc_wdt_info;
+       pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
+       pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
+       pdc_wdt->wdt_dev.parent = &pdev->dev;
+
+       ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
+       if (ret < 0) {
+               pdc_wdt->wdt_dev.timeout = pdc_wdt->wdt_dev.max_timeout;
+               dev_warn(&pdev->dev,
+                        "Initial timeout out of range! setting max timeout\n");
+       }
+
+       pdc_wdt_stop(&pdc_wdt->wdt_dev);
+
+       /* Find what caused the last reset */
+       val = readl(pdc_wdt->base + PDC_WDT_TICKLE1);
+       val = (val & PDC_WDT_TICKLE_STATUS_MASK) >> PDC_WDT_TICKLE_STATUS_SHIFT;
+       switch (val) {
+       case PDC_WDT_TICKLE_STATUS_TICKLE:
+       case PDC_WDT_TICKLE_STATUS_TIMEOUT:
+               pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET;
+               dev_info(&pdev->dev,
+                        "watchdog module last reset due to timeout\n");
+               break;
+       case PDC_WDT_TICKLE_STATUS_HRESET:
+               dev_info(&pdev->dev,
+                        "watchdog module last reset due to hard reset\n");
+               break;
+       case PDC_WDT_TICKLE_STATUS_SRESET:
+               dev_info(&pdev->dev,
+                        "watchdog module last reset due to soft reset\n");
+               break;
+       case PDC_WDT_TICKLE_STATUS_USER:
+               dev_info(&pdev->dev,
+                        "watchdog module last reset due to user reset\n");
+               break;
+       default:
+               dev_info(&pdev->dev,
+                        "contains an illegal status code (%08x)\n", val);
+               break;
+       }
+
+       watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
+
+       platform_set_drvdata(pdev, pdc_wdt);
+       watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
+
+       ret = watchdog_register_device(&pdc_wdt->wdt_dev);
+       if (ret)
+               goto disable_wdt_clk;
+
+       return 0;
+
+disable_wdt_clk:
+       clk_disable_unprepare(pdc_wdt->wdt_clk);
+disable_sys_clk:
+       clk_disable_unprepare(pdc_wdt->sys_clk);
+       return ret;
+}
+
+static void pdc_wdt_shutdown(struct platform_device *pdev)
+{
+       struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
+
+       pdc_wdt_stop(&pdc_wdt->wdt_dev);
+}
+
+static int pdc_wdt_remove(struct platform_device *pdev)
+{
+       struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
+
+       pdc_wdt_stop(&pdc_wdt->wdt_dev);
+       watchdog_unregister_device(&pdc_wdt->wdt_dev);
+       clk_disable_unprepare(pdc_wdt->wdt_clk);
+       clk_disable_unprepare(pdc_wdt->sys_clk);
+
+       return 0;
+}
+
+static const struct of_device_id pdc_wdt_match[] = {
+       { .compatible = "img,pdc-wdt" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, pdc_wdt_match);
+
+static struct platform_driver pdc_wdt_driver = {
+       .driver = {
+               .name = "imgpdc-wdt",
+               .of_match_table = pdc_wdt_match,
+       },
+       .probe = pdc_wdt_probe,
+       .remove = pdc_wdt_remove,
+       .shutdown = pdc_wdt_shutdown,
+};
+module_platform_driver(pdc_wdt_driver);
+
+MODULE_AUTHOR("Jude Abraham <Jude.Abraham@imgtec.com>");
+MODULE_AUTHOR("Naidu Tellapati <Naidu.Tellapati@imgtec.com>");
+MODULE_DESCRIPTION("Imagination Technologies PDC Watchdog Timer Driver");
+MODULE_LICENSE("GPL v2");
index 5142bbabe0279f0b36c92c854f7daa407e2e32a5..5e6d808d358a5c342ae11b9fd71ce5e09416b809 100644 (file)
@@ -205,7 +205,7 @@ static inline void imx2_wdt_ping_if_active(struct watchdog_device *wdog)
        }
 }
 
-static struct watchdog_ops imx2_wdt_ops = {
+static const struct watchdog_ops imx2_wdt_ops = {
        .owner = THIS_MODULE,
        .start = imx2_wdt_start,
        .stop = imx2_wdt_stop,
@@ -213,7 +213,7 @@ static struct watchdog_ops imx2_wdt_ops = {
        .set_timeout = imx2_wdt_set_timeout,
 };
 
-static struct regmap_config imx2_wdt_regmap_config = {
+static const struct regmap_config imx2_wdt_regmap_config = {
        .reg_bits = 16,
        .reg_stride = 2,
        .val_bits = 16,
index 0b93739c0106b17c885a4ef8292681d449851491..e54839b12650a52f22dffe2292ca2268fbef91d7 100644 (file)
@@ -12,8 +12,8 @@
  *                 http://www.ite.com.tw/
  *
  *     Support of the watchdog timers, which are available on
- *     IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726
- *     and IT8728.
+ *     IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
+ *     IT8728 and IT8783.
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
@@ -87,6 +87,7 @@
 #define IT8721_ID      0x8721
 #define IT8726_ID      0x8726  /* the data sheet suggest wrongly 0x8716 */
 #define IT8728_ID      0x8728
+#define IT8783_ID      0x8783
 
 /* GPIO Configuration Registers LDN=0x07 */
 #define WDTCTRL                0x71
@@ -633,6 +634,7 @@ static int __init it87_wdt_init(void)
        case IT8720_ID:
        case IT8721_ID:
        case IT8728_ID:
+       case IT8783_ID:
                max_units = 65535;
                try_gameport = 0;
                break;
index 18e41afa4da38c53e4f0f6bdb55dcbec6ba151d6..4c2cc09c0c5780ec859c8643c65e887bfd3b0b54 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/of.h>
 
 #include <asm/mach-jz4740/timer.h>
 
@@ -142,6 +143,14 @@ static const struct watchdog_ops jz4740_wdt_ops = {
        .set_timeout = jz4740_wdt_set_timeout,
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id jz4740_wdt_of_matches[] = {
+       { .compatible = "ingenic,jz4740-watchdog", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches)
+#endif
+
 static int jz4740_wdt_probe(struct platform_device *pdev)
 {
        struct jz4740_wdt_drvdata *drvdata;
@@ -211,6 +220,7 @@ static struct platform_driver jz4740_wdt_driver = {
        .remove = jz4740_wdt_remove,
        .driver = {
                .name = "jz4740-wdt",
+               .of_match_table = of_match_ptr(jz4740_wdt_of_matches),
        },
 };
 
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
new file mode 100644 (file)
index 0000000..a87f6df
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * Mediatek Watchdog Driver
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Based on sunxi_wdt.c
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+
+#define WDT_MAX_TIMEOUT                31
+#define WDT_MIN_TIMEOUT                1
+#define WDT_LENGTH_TIMEOUT(n)  ((n) << 5)
+
+#define WDT_LENGTH             0x04
+#define WDT_LENGTH_KEY         0x8
+
+#define WDT_RST                        0x08
+#define WDT_RST_RELOAD         0x1971
+
+#define WDT_MODE               0x00
+#define WDT_MODE_EN            (1 << 0)
+#define WDT_MODE_EXT_POL_LOW   (0 << 1)
+#define WDT_MODE_EXT_POL_HIGH  (1 << 1)
+#define WDT_MODE_EXRST_EN      (1 << 2)
+#define WDT_MODE_IRQ_EN                (1 << 3)
+#define WDT_MODE_AUTO_START    (1 << 4)
+#define WDT_MODE_DUAL_EN       (1 << 6)
+#define WDT_MODE_KEY           0x22000000
+
+#define WDT_SWRST              0x14
+#define WDT_SWRST_KEY          0x1209
+
+#define DRV_NAME               "mtk-wdt"
+#define DRV_VERSION            "1.0"
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int timeout = WDT_MAX_TIMEOUT;
+
+struct mtk_wdt_dev {
+       struct watchdog_device wdt_dev;
+       void __iomem *wdt_base;
+       struct notifier_block restart_handler;
+};
+
+static int mtk_reset_handler(struct notifier_block *this, unsigned long mode,
+                               void *cmd)
+{
+       struct mtk_wdt_dev *mtk_wdt;
+       void __iomem *wdt_base;
+
+       mtk_wdt = container_of(this, struct mtk_wdt_dev, restart_handler);
+       wdt_base = mtk_wdt->wdt_base;
+
+       while (1) {
+               writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST);
+               mdelay(5);
+       }
+
+       return NOTIFY_DONE;
+}
+
+static int mtk_wdt_ping(struct watchdog_device *wdt_dev)
+{
+       struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+       void __iomem *wdt_base = mtk_wdt->wdt_base;
+
+       iowrite32(WDT_RST_RELOAD, wdt_base + WDT_RST);
+
+       return 0;
+}
+
+static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
+                               unsigned int timeout)
+{
+       struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+       void __iomem *wdt_base = mtk_wdt->wdt_base;
+       u32 reg;
+
+       wdt_dev->timeout = timeout;
+
+       /*
+        * One bit is the value of 512 ticks
+        * The clock has 32 KHz
+        */
+       reg = WDT_LENGTH_TIMEOUT(timeout << 6) | WDT_LENGTH_KEY;
+       iowrite32(reg, wdt_base + WDT_LENGTH);
+
+       mtk_wdt_ping(wdt_dev);
+
+       return 0;
+}
+
+static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
+{
+       struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+       void __iomem *wdt_base = mtk_wdt->wdt_base;
+       u32 reg;
+
+       reg = readl(wdt_base + WDT_MODE);
+       reg &= ~WDT_MODE_EN;
+       iowrite32(reg, wdt_base + WDT_MODE);
+
+       return 0;
+}
+
+static int mtk_wdt_start(struct watchdog_device *wdt_dev)
+{
+       u32 reg;
+       struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+       void __iomem *wdt_base = mtk_wdt->wdt_base;
+       u32 ret;
+
+       ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+       if (ret < 0)
+               return ret;
+
+       reg = ioread32(wdt_base + WDT_MODE);
+       reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
+       reg |= (WDT_MODE_EN | WDT_MODE_KEY);
+       iowrite32(reg, wdt_base + WDT_MODE);
+
+       return 0;
+}
+
+static const struct watchdog_info mtk_wdt_info = {
+       .identity       = DRV_NAME,
+       .options        = WDIOF_SETTIMEOUT |
+                         WDIOF_KEEPALIVEPING |
+                         WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops mtk_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = mtk_wdt_start,
+       .stop           = mtk_wdt_stop,
+       .ping           = mtk_wdt_ping,
+       .set_timeout    = mtk_wdt_set_timeout,
+};
+
+static int mtk_wdt_probe(struct platform_device *pdev)
+{
+       struct mtk_wdt_dev *mtk_wdt;
+       struct resource *res;
+       int err;
+
+       mtk_wdt = devm_kzalloc(&pdev->dev, sizeof(*mtk_wdt), GFP_KERNEL);
+       if (!mtk_wdt)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, mtk_wdt);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mtk_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mtk_wdt->wdt_base))
+               return PTR_ERR(mtk_wdt->wdt_base);
+
+       mtk_wdt->wdt_dev.info = &mtk_wdt_info;
+       mtk_wdt->wdt_dev.ops = &mtk_wdt_ops;
+       mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
+       mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
+       mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
+       mtk_wdt->wdt_dev.parent = &pdev->dev;
+
+       watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, &pdev->dev);
+       watchdog_set_nowayout(&mtk_wdt->wdt_dev, nowayout);
+
+       watchdog_set_drvdata(&mtk_wdt->wdt_dev, mtk_wdt);
+
+       mtk_wdt_stop(&mtk_wdt->wdt_dev);
+
+       err = watchdog_register_device(&mtk_wdt->wdt_dev);
+       if (unlikely(err))
+               return err;
+
+       mtk_wdt->restart_handler.notifier_call = mtk_reset_handler;
+       mtk_wdt->restart_handler.priority = 128;
+       err = register_restart_handler(&mtk_wdt->restart_handler);
+       if (err)
+               dev_warn(&pdev->dev,
+                       "cannot register restart handler (err=%d)\n", err);
+
+       dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
+                       mtk_wdt->wdt_dev.timeout, nowayout);
+
+       return 0;
+}
+
+static int mtk_wdt_remove(struct platform_device *pdev)
+{
+       struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
+
+       unregister_restart_handler(&mtk_wdt->restart_handler);
+
+       watchdog_unregister_device(&mtk_wdt->wdt_dev);
+
+       return 0;
+}
+
+static const struct of_device_id mtk_wdt_dt_ids[] = {
+       { .compatible = "mediatek,mt6589-wdt" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
+
+static struct platform_driver mtk_wdt_driver = {
+       .probe          = mtk_wdt_probe,
+       .remove         = mtk_wdt_remove,
+       .driver         = {
+               .name           = DRV_NAME,
+               .of_match_table = mtk_wdt_dt_ids,
+       },
+};
+
+module_platform_driver(mtk_wdt_driver);
+
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+                       __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matthias Brugger <matthias.bgg@gmail.com>");
+MODULE_DESCRIPTION("Mediatek WatchDog Timer Driver");
+MODULE_VERSION(DRV_VERSION);
index 9f2709db61ca921ac1a97ff055735fcf7612349f..1e6be9e405779884c315de023a8aad18e5c07419 100644 (file)
@@ -189,7 +189,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
 }
 
 static const struct watchdog_info omap_wdt_info = {
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
        .identity = "OMAP Watchdog",
 };
 
index a7a0695971e468e19142645b7bdca8be38671bb4..b7c68e275aeb357503b3e10e48b2102444348b92 100644 (file)
@@ -94,7 +94,7 @@ static int retu_wdt_set_timeout(struct watchdog_device *wdog,
 }
 
 static const struct watchdog_info retu_wdt_info = {
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
        .identity = "Retu watchdog",
 };
 
index 11aad5b7aafe8fe3122f61d99d347723381a3148..a6f7e2e29bebbdb201952e5a14fa268e3cc4b1f1 100644 (file)
@@ -45,6 +45,7 @@
 static struct clk *rt288x_wdt_clk;
 static unsigned long rt288x_wdt_freq;
 static void __iomem *rt288x_wdt_base;
+static struct reset_control *rt288x_wdt_reset;
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0);
@@ -151,16 +152,18 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
        if (IS_ERR(rt288x_wdt_clk))
                return PTR_ERR(rt288x_wdt_clk);
 
-       device_reset(&pdev->dev);
+       rt288x_wdt_reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (!IS_ERR(rt288x_wdt_reset))
+               reset_control_deassert(rt288x_wdt_reset);
 
        rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
 
        rt288x_wdt_dev.dev = &pdev->dev;
        rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
-
        rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
-       rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout;
 
+       watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
+                             &pdev->dev);
        watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
 
        ret = watchdog_register_device(&rt288x_wdt_dev);
index 12c15903d0989d4b3bc14dfcd83943e8d549e703..2c1db6fa9a2724ae906f8682dd29ea176a752daf 100644 (file)
@@ -57,7 +57,7 @@ static int twl4030_wdt_set_timeout(struct watchdog_device *wdt,
 }
 
 static const struct watchdog_info twl4030_wdt_info = {
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
        .identity = "TWL4030 Watchdog",
 };
 
index 7165704a3e33e2e4fdd034562492122baf990ff2..5824e25eebbbab1c60e35070a20f93f92f979d6f 100644 (file)
@@ -50,7 +50,7 @@ static int cr_wdt_control;    /* WDT control register */
 
 enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
             w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
-            w83667hg_b, nct6775, nct6776, nct6779 };
+            w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792 };
 
 static int timeout;                    /* in seconds */
 module_param(timeout, int, 0);
@@ -95,6 +95,8 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
 #define NCT6775_ID             0xb4
 #define NCT6776_ID             0xc3
 #define NCT6779_ID             0xc5
+#define NCT6791_ID             0xc8
+#define NCT6792_ID             0xc9
 
 #define W83627HF_WDT_TIMEOUT   0xf6
 #define W83697HF_WDT_TIMEOUT   0xf4
@@ -195,6 +197,8 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
        case nct6775:
        case nct6776:
        case nct6779:
+       case nct6791:
+       case nct6792:
                /*
                 * These chips have a fixed WDTO# output pin (W83627UHG),
                 * or support more than one WDTO# output pin.
@@ -395,6 +399,12 @@ static int wdt_find(int addr)
        case NCT6779_ID:
                ret = nct6779;
                break;
+       case NCT6791_ID:
+               ret = nct6791;
+               break;
+       case NCT6792_ID:
+               ret = nct6792;
+               break;
        case 0xff:
                ret = -ENODEV;
                break;
@@ -428,6 +438,8 @@ static int __init wdt_init(void)
                "NCT6775",
                "NCT6776",
                "NCT6779",
+               "NCT6791",
+               "NCT6792",
        };
 
        wdt_io = 0x2e;
index 2140398a2a8c6b5f521c6250bb56a56ad41c9df7..2ccd3592d41f549967f7597ed86bd77aef8556bf 100644 (file)
@@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
 obj-$(CONFIG_HOTPLUG_CPU)              += cpu_hotplug.o
 endif
 obj-$(CONFIG_X86)                      += fallback.o
-obj-y  += grant-table.o features.o balloon.o manage.o
+obj-y  += grant-table.o features.o balloon.o manage.o preempt.o
 obj-y  += events/
 obj-y  += xenbus/
 
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
new file mode 100644 (file)
index 0000000..a1800c1
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Preemptible hypercalls
+ *
+ * Copyright (C) 2014 Citrix Systems R&D ltd.
+ *
+ * This source code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <xen/xen-ops.h>
+
+#ifndef CONFIG_PREEMPT
+
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+asmlinkage __visible void xen_maybe_preempt_hcall(void)
+{
+       if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+                    && should_resched())) {
+               /*
+                * Clear flag as we may be rescheduled on a different
+                * cpu.
+                */
+               __this_cpu_write(xen_in_preemptible_hcall, false);
+               _cond_resched();
+               __this_cpu_write(xen_in_preemptible_hcall, true);
+       }
+}
+#endif /* CONFIG_PREEMPT */
index 569a13b9e856de5c3900050d583844f401243e96..59ac71c4a04352d055f40435444e7ccc46debf31 100644 (file)
@@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata)
        if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
                return -EFAULT;
 
+       xen_preemptible_hcall_begin();
        ret = privcmd_call(hypercall.op,
                           hypercall.arg[0], hypercall.arg[1],
                           hypercall.arg[2], hypercall.arg[3],
                           hypercall.arg[4]);
+       xen_preemptible_hcall_end();
 
        return ret;
 }
index 61653a03a8f5037c12e28c5087e3fd8960342460..9faca6a60bb01b33c311f86b01af239280934a78 100644 (file)
@@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 {
        struct vscsiif_back_ring *ring = &info->ring;
-       struct vscsiif_request *ring_req;
+       struct vscsiif_request ring_req;
        struct vscsibk_pend *pending_req;
        RING_IDX rc, rp;
        int err, more_to_do;
        uint32_t result;
-       uint8_t act;
 
        rc = ring->req_cons;
        rp = ring->sring->req_prod;
@@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                if (!pending_req)
                        return 1;
 
-               ring_req = RING_GET_REQUEST(ring, rc);
+               ring_req = *RING_GET_REQUEST(ring, rc);
                ring->req_cons = ++rc;
 
-               act = ring_req->act;
-               err = prepare_pending_reqs(info, ring_req, pending_req);
+               err = prepare_pending_reqs(info, &ring_req, pending_req);
                if (err) {
                        switch (err) {
                        case -ENODEV:
@@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                        return 1;
                }
 
-               switch (act) {
+               switch (ring_req.act) {
                case VSCSIIF_ACT_SCSI_CDB:
-                       if (scsiback_gnttab_data_map(ring_req, pending_req)) {
+                       if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
                                scsiback_fast_flush_area(pending_req);
                                scsiback_do_resp_with_sense(NULL,
                                        DRIVER_ERROR << 24, 0, pending_req);
@@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                        break;
                case VSCSIIF_ACT_SCSI_ABORT:
                        scsiback_device_action(pending_req, TMR_ABORT_TASK,
-                               ring_req->ref_rqid);
+                               ring_req.ref_rqid);
                        break;
                case VSCSIIF_ACT_SCSI_RESET:
                        scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
index 9ee5343d48849d85c1404538ce3f5433ad460d6a..3662f1d1d9cf0fc2f73c44fa4c34fb6e14c0af5f 100644 (file)
@@ -1127,7 +1127,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
        }
 
        /* Write all dirty data */
-       if (S_ISREG(dentry->d_inode->i_mode))
+       if (d_is_reg(dentry))
                filemap_write_and_wait(dentry->d_inode->i_mapping);
 
        retval = p9_client_wstat(fid, &wstat);
index 118a2e0088d8fdd8391654a44edb06157dad5629..f8e52a1854c1ab383e32383ac65a0f167e385793 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
 
        ret = -EINVAL;
        if (unlikely(ctx || nr_events == 0)) {
-               pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+               pr_debug("EINVAL: ctx %lu nr_events %u\n",
                         ctx, nr_events);
                goto out;
        }
@@ -1333,7 +1333,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 
                return ret;
        }
-       pr_debug("EINVAL: io_destroy: invalid context id\n");
+       pr_debug("EINVAL: invalid context id\n");
        return -EINVAL;
 }
 
@@ -1515,7 +1515,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
            (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
            ((ssize_t)iocb->aio_nbytes < 0)
           )) {
-               pr_debug("EINVAL: io_submit: overflow check\n");
+               pr_debug("EINVAL: overflow check\n");
                return -EINVAL;
        }
 
index aaf96cb25452cf04a5d7a47f2b506486e67cd502..ac7d921ed9844b0a0c6afd0e6d4eaf4ca718f955 100644 (file)
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
  */
 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
 {
-       struct autofs_dev_ioctl tmp;
+       struct autofs_dev_ioctl tmp, *res;
 
        if (copy_from_user(&tmp, in, sizeof(tmp)))
                return ERR_PTR(-EFAULT);
@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
        if (tmp.size > (PATH_MAX + sizeof(tmp)))
                return ERR_PTR(-ENAMETOOLONG);
 
-       return memdup_user(in, tmp.size);
+       res = memdup_user(in, tmp.size);
+       if (!IS_ERR(res))
+               res->size = tmp.size;
+
+       return res;
 }
 
 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
index bfdbaba9c2ba40e7216d7435d69d45016c9ec5ff..11dd118f75e25e6a8f25f2143724901fb64c72a0 100644 (file)
@@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry,
                return NULL;
        }
 
-       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dentry->d_inode && d_is_symlink(dentry)) {
                DPRINTK("checking symlink %p %pd", dentry, dentry);
                /*
                 * A symlink can't be "busy" in the usual sense so
index dbb5b7212ce162130727256d49351e3f2d2677c9..7e44fdd03e2dd0a684036be49e7c2152c192ee63 100644 (file)
@@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
        struct dentry *dentry = file->f_path.dentry;
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
 
-       DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry);
+       DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry);
 
        if (autofs4_oz_mode(sbi))
                goto out;
@@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
         * having d_mountpoint() true, so there's no need to call back
         * to the daemon.
         */
-       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dentry->d_inode && d_is_symlink(dentry)) {
                spin_unlock(&sbi->fs_lock);
                goto done;
        }
@@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
                 * an incorrect ELOOP error return.
                 */
                if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
-                   (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+                   (dentry->d_inode && d_is_symlink(dentry)))
                        status = -EISDIR;
        }
        spin_unlock(&sbi->fs_lock);
index afd2b4408adf53d78c716043c847ba48cf5c3bef..861b1e1c477710faced77767a0327c1d6e6b79c5 100644 (file)
 #include <linux/namei.h>
 #include <linux/poll.h>
 
-
-static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_read(struct file *filp, char __user *buf,
-                       size_t size, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_write(struct file *filp, const char __user *buf,
-                       size_t siz, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       return -EIO;
-}
-
-static int bad_file_readdir(struct file *file, struct dir_context *ctx)
-{
-       return -EIO;
-}
-
-static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
-{
-       return POLLERR;
-}
-
-static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
-                       unsigned long arg)
-{
-       return -EIO;
-}
-
-static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
-                       unsigned long arg)
-{
-       return -EIO;
-}
-
-static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       return -EIO;
-}
-
 static int bad_file_open(struct inode *inode, struct file *filp)
 {
        return -EIO;
 }
 
-static int bad_file_flush(struct file *file, fl_owner_t id)
-{
-       return -EIO;
-}
-
-static int bad_file_release(struct inode *inode, struct file *filp)
-{
-       return -EIO;
-}
-
-static int bad_file_fsync(struct file *file, loff_t start, loff_t end,
-                         int datasync)
-{
-       return -EIO;
-}
-
-static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
-{
-       return -EIO;
-}
-
-static int bad_file_fasync(int fd, struct file *filp, int on)
-{
-       return -EIO;
-}
-
-static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_sendpage(struct file *file, struct page *page,
-                       int off, size_t len, loff_t *pos, int more)
-{
-       return -EIO;
-}
-
-static unsigned long bad_file_get_unmapped_area(struct file *file,
-                               unsigned long addr, unsigned long len,
-                               unsigned long pgoff, unsigned long flags)
-{
-       return -EIO;
-}
-
-static int bad_file_check_flags(int flags)
-{
-       return -EIO;
-}
-
-static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
-                       struct file *out, loff_t *ppos, size_t len,
-                       unsigned int flags)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
-                       struct pipe_inode_info *pipe, size_t len,
-                       unsigned int flags)
-{
-       return -EIO;
-}
-
 static const struct file_operations bad_file_ops =
 {
-       .llseek         = bad_file_llseek,
-       .read           = bad_file_read,
-       .write          = bad_file_write,
-       .aio_read       = bad_file_aio_read,
-       .aio_write      = bad_file_aio_write,
-       .iterate        = bad_file_readdir,
-       .poll           = bad_file_poll,
-       .unlocked_ioctl = bad_file_unlocked_ioctl,
-       .compat_ioctl   = bad_file_compat_ioctl,
-       .mmap           = bad_file_mmap,
        .open           = bad_file_open,
-       .flush          = bad_file_flush,
-       .release        = bad_file_release,
-       .fsync          = bad_file_fsync,
-       .aio_fsync      = bad_file_aio_fsync,
-       .fasync         = bad_file_fasync,
-       .lock           = bad_file_lock,
-       .sendpage       = bad_file_sendpage,
-       .get_unmapped_area = bad_file_get_unmapped_area,
-       .check_flags    = bad_file_check_flags,
-       .flock          = bad_file_flock,
-       .splice_write   = bad_file_splice_write,
-       .splice_read    = bad_file_splice_read,
 };
 
 static int bad_inode_create (struct inode *dir, struct dentry *dentry,
index 02b16910f4c9d500619286029ee16f0815bce269..995986b8e36b8f3fd8529582c50e545d9b26322e 100644 (file)
@@ -645,11 +645,12 @@ out:
 
 static unsigned long randomize_stack_top(unsigned long stack_top)
 {
-       unsigned int random_variable = 0;
+       unsigned long random_variable = 0;
 
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               random_variable = get_random_int() & STACK_RND_MASK;
+               random_variable = (unsigned long) get_random_int();
+               random_variable &= STACK_RND_MASK;
                random_variable <<= PAGE_SHIFT;
        }
 #ifdef CONFIG_STACK_GROWSUP
index 8729cf68d2fef5e41540283d74beba55285f59c5..f55721ff938544c1b73d0d8cb057a52dba5b7884 100644 (file)
@@ -1246,25 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-/*
- * this makes the path point to (inum INODE_ITEM ioff)
- */
-int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
-                       struct btrfs_path *path)
-{
-       struct btrfs_key key;
-       return btrfs_find_item(fs_root, path, inum, ioff,
-                       BTRFS_INODE_ITEM_KEY, &key);
-}
-
-static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
-                               struct btrfs_path *path,
-                               struct btrfs_key *found_key)
-{
-       return btrfs_find_item(fs_root, path, inum, ioff,
-                       BTRFS_INODE_REF_KEY, found_key);
-}
-
 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
                          u64 start_off, struct btrfs_path *path,
                          struct btrfs_inode_extref **ret_extref,
@@ -1374,7 +1355,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
                }
-               ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
+               ret = btrfs_find_item(fs_root, path, parent, 0,
+                               BTRFS_INODE_REF_KEY, &found_key);
                if (ret > 0)
                        ret = -ENOENT;
                if (ret)
@@ -1727,8 +1709,10 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
        struct btrfs_key found_key;
 
        while (!ret) {
-               ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
-                                    &found_key);
+               ret = btrfs_find_item(fs_root, path, inum,
+                               parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
+                               &found_key);
+
                if (ret < 0)
                        break;
                if (ret) {
index 2a1ac6bfc724637f3a80ac15c6ce6237dc7998f0..9c41fbac30091f39bad52a3f21ebf6e931db364b 100644 (file)
@@ -32,9 +32,6 @@ struct inode_fs_paths {
 typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
                void *ctx);
 
-int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
-                       struct btrfs_path *path);
-
 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
                        struct btrfs_path *path, struct btrfs_key *found_key,
                        u64 *flags);
index 4aadadcfab20178d734ad395c1448676603a01b8..de5e4f2adfeac9d07ba2539a8098b44cc9781580 100644 (file)
@@ -185,6 +185,9 @@ struct btrfs_inode {
 
        struct btrfs_delayed_node *delayed_node;
 
+       /* File creation time. */
+       struct timespec i_otime;
+
        struct inode vfs_inode;
 };
 
index 14a72ed14ef7b1c2a5b78688c0b316d129b2bb72..993642199326a757f55c78dbc2722ecc2b409b60 100644 (file)
@@ -213,11 +213,19 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
  */
 static void add_root_to_dirty_list(struct btrfs_root *root)
 {
+       if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
+           !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
+               return;
+
        spin_lock(&root->fs_info->trans_lock);
-       if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
-           list_empty(&root->dirty_list)) {
-               list_add(&root->dirty_list,
-                        &root->fs_info->dirty_cowonly_roots);
+       if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
+               /* Want the extent tree to be the last on the list */
+               if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+                       list_move_tail(&root->dirty_list,
+                                      &root->fs_info->dirty_cowonly_roots);
+               else
+                       list_move(&root->dirty_list,
+                                 &root->fs_info->dirty_cowonly_roots);
        }
        spin_unlock(&root->fs_info->trans_lock);
 }
@@ -1363,8 +1371,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 
        if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
                BUG_ON(tm->slot != 0);
-               eb_rewin = alloc_dummy_extent_buffer(eb->start,
-                                               fs_info->tree_root->nodesize);
+               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
                if (!eb_rewin) {
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
@@ -1444,7 +1451,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        } else if (old_root) {
                btrfs_tree_read_unlock(eb_root);
                free_extent_buffer(eb_root);
-               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
+               eb = alloc_dummy_extent_buffer(root->fs_info, logical);
        } else {
                btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
                eb = btrfs_clone_extent_buffer(eb_root);
@@ -2282,7 +2289,7 @@ static void reada_for_search(struct btrfs_root *root,
                if ((search <= target && target - search <= 65536) ||
                    (search > target && search - target <= 65536)) {
                        gen = btrfs_node_ptr_generation(node, nr);
-                       readahead_tree_block(root, search, blocksize);
+                       readahead_tree_block(root, search);
                        nread += blocksize;
                }
                nscan++;
@@ -2301,7 +2308,6 @@ static noinline void reada_for_balance(struct btrfs_root *root,
        u64 gen;
        u64 block1 = 0;
        u64 block2 = 0;
-       int blocksize;
 
        parent = path->nodes[level + 1];
        if (!parent)
@@ -2309,7 +2315,6 @@ static noinline void reada_for_balance(struct btrfs_root *root,
 
        nritems = btrfs_header_nritems(parent);
        slot = path->slots[level + 1];
-       blocksize = root->nodesize;
 
        if (slot > 0) {
                block1 = btrfs_node_blockptr(parent, slot - 1);
@@ -2334,9 +2339,9 @@ static noinline void reada_for_balance(struct btrfs_root *root,
        }
 
        if (block1)
-               readahead_tree_block(root, block1, blocksize);
+               readahead_tree_block(root, block1);
        if (block2)
-               readahead_tree_block(root, block2, blocksize);
+               readahead_tree_block(root, block2);
 }
 
 
@@ -2609,32 +2614,24 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key,
        return 0;
 }
 
-int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
+int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
                u64 iobjectid, u64 ioff, u8 key_type,
                struct btrfs_key *found_key)
 {
        int ret;
        struct btrfs_key key;
        struct extent_buffer *eb;
-       struct btrfs_path *path;
+
+       ASSERT(path);
+       ASSERT(found_key);
 
        key.type = key_type;
        key.objectid = iobjectid;
        key.offset = ioff;
 
-       if (found_path == NULL) {
-               path = btrfs_alloc_path();
-               if (!path)
-                       return -ENOMEM;
-       } else
-               path = found_path;
-
        ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
-       if ((ret < 0) || (found_key == NULL)) {
-               if (path != found_path)
-                       btrfs_free_path(path);
+       if (ret < 0)
                return ret;
-       }
 
        eb = path->nodes[0];
        if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
@@ -3383,7 +3380,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        add_root_to_dirty_list(root);
        extent_buffer_get(c);
        path->nodes[level] = c;
-       path->locks[level] = BTRFS_WRITE_LOCK;
+       path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
        path->slots[level] = 0;
        return 0;
 }
@@ -4356,13 +4353,15 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
        path->search_for_split = 1;
        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
        path->search_for_split = 0;
+       if (ret > 0)
+               ret = -EAGAIN;
        if (ret < 0)
                goto err;
 
        ret = -EAGAIN;
        leaf = path->nodes[0];
-       /* if our item isn't there or got smaller, return now */
-       if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
+       /* if our item isn't there, return now */
+       if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
                goto err;
 
        /* the leaf has  changed, it now has room.  return now */
index 0b180708bf79d87a36c9dcc78bbd6d72772101df..84c3b00f3de8eedf47ba3bec71939a5fba1f90a0 100644 (file)
@@ -198,6 +198,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
 
 #define BTRFS_DIRTY_METADATA_THRESH    (32 * 1024 * 1024)
 
+#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024)
+
 /*
  * The key defines the order in the tree, and so it also defines (optimal)
  * block layout.
@@ -1020,6 +1022,9 @@ enum btrfs_raid_types {
                                         BTRFS_BLOCK_GROUP_RAID6 |   \
                                         BTRFS_BLOCK_GROUP_DUP |     \
                                         BTRFS_BLOCK_GROUP_RAID10)
+#define BTRFS_BLOCK_GROUP_RAID56_MASK  (BTRFS_BLOCK_GROUP_RAID5 |   \
+                                        BTRFS_BLOCK_GROUP_RAID6)
+
 /*
  * We need a bit for restriper to be able to tell when chunks of type
  * SINGLE are available.  This "extended" profile format is used in
@@ -1239,7 +1244,6 @@ enum btrfs_disk_cache_state {
        BTRFS_DC_ERROR          = 1,
        BTRFS_DC_CLEAR          = 2,
        BTRFS_DC_SETUP          = 3,
-       BTRFS_DC_NEED_WRITE     = 4,
 };
 
 struct btrfs_caching_control {
@@ -1277,7 +1281,6 @@ struct btrfs_block_group_cache {
        unsigned long full_stripe_len;
 
        unsigned int ro:1;
-       unsigned int dirty:1;
        unsigned int iref:1;
        unsigned int has_caching_ctl:1;
        unsigned int removed:1;
@@ -1315,6 +1318,9 @@ struct btrfs_block_group_cache {
        struct list_head ro_list;
 
        atomic_t trimming;
+
+       /* For dirty block groups */
+       struct list_head dirty_list;
 };
 
 /* delayed seq elem */
@@ -1741,6 +1747,7 @@ struct btrfs_fs_info {
 
        spinlock_t unused_bgs_lock;
        struct list_head unused_bgs;
+       struct mutex unused_bg_unpin_mutex;
 
        /* For btrfs to record security options */
        struct security_mnt_opts security_opts;
@@ -1776,6 +1783,7 @@ struct btrfs_subvolume_writers {
 #define BTRFS_ROOT_DEFRAG_RUNNING      6
 #define BTRFS_ROOT_FORCE_COW           7
 #define BTRFS_ROOT_MULTI_LOG_TASKS     8
+#define BTRFS_ROOT_DIRTY               9
 
 /*
  * in ram representation of the tree.  extent_root is used for all allocations
@@ -1794,8 +1802,6 @@ struct btrfs_root {
        struct btrfs_fs_info *fs_info;
        struct extent_io_tree dirty_log_pages;
 
-       struct kobject root_kobj;
-       struct completion kobj_unregister;
        struct mutex objectid_mutex;
 
        spinlock_t accounting_lock;
@@ -2465,31 +2471,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
-
-static inline struct btrfs_timespec *
-btrfs_inode_atime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, atime);
-       return (struct btrfs_timespec *)ptr;
-}
-
-static inline struct btrfs_timespec *
-btrfs_inode_mtime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, mtime);
-       return (struct btrfs_timespec *)ptr;
-}
-
-static inline struct btrfs_timespec *
-btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, ctime);
-       return (struct btrfs_timespec *)ptr;
-}
-
 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
index de4e70fb3cbbd4a5c28d13f1fe3aec16733ed49f..82f0c7c954747363859fff768917f34ec67765ab 100644 (file)
@@ -1755,27 +1755,31 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
        btrfs_set_stack_inode_block_group(inode_item, 0);
 
-       btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
+       btrfs_set_stack_timespec_sec(&inode_item->atime,
                                     inode->i_atime.tv_sec);
-       btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
+       btrfs_set_stack_timespec_nsec(&inode_item->atime,
                                      inode->i_atime.tv_nsec);
 
-       btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
+       btrfs_set_stack_timespec_sec(&inode_item->mtime,
                                     inode->i_mtime.tv_sec);
-       btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
+       btrfs_set_stack_timespec_nsec(&inode_item->mtime,
                                      inode->i_mtime.tv_nsec);
 
-       btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
+       btrfs_set_stack_timespec_sec(&inode_item->ctime,
                                     inode->i_ctime.tv_sec);
-       btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
+       btrfs_set_stack_timespec_nsec(&inode_item->ctime,
                                      inode->i_ctime.tv_nsec);
+
+       btrfs_set_stack_timespec_sec(&inode_item->otime,
+                                    BTRFS_I(inode)->i_otime.tv_sec);
+       btrfs_set_stack_timespec_nsec(&inode_item->otime,
+                                    BTRFS_I(inode)->i_otime.tv_nsec);
 }
 
 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
 {
        struct btrfs_delayed_node *delayed_node;
        struct btrfs_inode_item *inode_item;
-       struct btrfs_timespec *tspec;
 
        delayed_node = btrfs_get_delayed_node(inode);
        if (!delayed_node)
@@ -1802,17 +1806,19 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
        *rdev = btrfs_stack_inode_rdev(inode_item);
        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
 
-       tspec = btrfs_inode_atime(inode_item);
-       inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
-       inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+       inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
+       inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
+
+       inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
+       inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
 
-       tspec = btrfs_inode_mtime(inode_item);
-       inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
-       inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+       inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
+       inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
 
-       tspec = btrfs_inode_ctime(inode_item);
-       inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
-       inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+       BTRFS_I(inode)->i_otime.tv_sec =
+               btrfs_stack_timespec_sec(&inode_item->otime);
+       BTRFS_I(inode)->i_otime.tv_nsec =
+               btrfs_stack_timespec_nsec(&inode_item->otime);
 
        inode->i_generation = BTRFS_I(inode)->generation;
        BTRFS_I(inode)->index_cnt = (u64)-1;
index ca6a3a3b6b6c4cdbfac15f79f003823149493181..5ec03d999c37bfb3e48d937abd79b6dc57cd5257 100644 (file)
@@ -440,18 +440,9 @@ leave:
  */
 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
 {
-       s64 writers;
-       DEFINE_WAIT(wait);
-
        set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
-       do {
-               prepare_to_wait(&fs_info->replace_wait, &wait,
-                               TASK_UNINTERRUPTIBLE);
-               writers = percpu_counter_sum(&fs_info->bio_counter);
-               if (writers)
-                       schedule();
-               finish_wait(&fs_info->replace_wait, &wait);
-       } while (writers);
+       wait_event(fs_info->replace_wait, !percpu_counter_sum(
+                  &fs_info->bio_counter));
 }
 
 /*
@@ -932,15 +923,15 @@ void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
 
 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
 {
-       DEFINE_WAIT(wait);
-again:
-       percpu_counter_inc(&fs_info->bio_counter);
-       if (test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)) {
+       while (1) {
+               percpu_counter_inc(&fs_info->bio_counter);
+               if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
+                                    &fs_info->fs_state)))
+                       break;
+
                btrfs_bio_counter_dec(fs_info);
                wait_event(fs_info->replace_wait,
                           !test_bit(BTRFS_FS_STATE_DEV_REPLACING,
                                     &fs_info->fs_state));
-               goto again;
        }
-
 }
index 1afb18226da82c9bff5870c2f7770b164d7f68b2..f79f38542a737631e191e08aed6b50d4a940f964 100644 (file)
@@ -318,7 +318,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                        memcpy(&found, result, csum_size);
 
                        read_extent_buffer(buf, &val, 0, csum_size);
-                       printk_ratelimited(KERN_INFO
+                       printk_ratelimited(KERN_WARNING
                                "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
                                "level %d\n",
                                root->fs_info->sb->s_id, buf->start,
@@ -367,7 +367,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
                ret = 0;
                goto out;
        }
-       printk_ratelimited(KERN_INFO "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
+       printk_ratelimited(KERN_ERR
+           "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
                        eb->fs_info->sb->s_id, eb->start,
                        parent_transid, btrfs_header_generation(eb));
        ret = 1;
@@ -633,21 +634,21 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
 
        found_start = btrfs_header_bytenr(eb);
        if (found_start != eb->start) {
-               printk_ratelimited(KERN_INFO "BTRFS (device %s): bad tree block start "
+               printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start "
                               "%llu %llu\n",
                               eb->fs_info->sb->s_id, found_start, eb->start);
                ret = -EIO;
                goto err;
        }
        if (check_tree_block_fsid(root, eb)) {
-               printk_ratelimited(KERN_INFO "BTRFS (device %s): bad fsid on block %llu\n",
+               printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
                               eb->fs_info->sb->s_id, eb->start);
                ret = -EIO;
                goto err;
        }
        found_level = btrfs_header_level(eb);
        if (found_level >= BTRFS_MAX_LEVEL) {
-               btrfs_info(root->fs_info, "bad tree block level %d",
+               btrfs_err(root->fs_info, "bad tree block level %d",
                           (int)btrfs_header_level(eb));
                ret = -EIO;
                goto err;
@@ -1073,12 +1074,12 @@ static const struct address_space_operations btree_aops = {
        .set_page_dirty = btree_set_page_dirty,
 };
 
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
+void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
 {
        struct extent_buffer *buf = NULL;
        struct inode *btree_inode = root->fs_info->btree_inode;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return;
        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
@@ -1086,7 +1087,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
        free_extent_buffer(buf);
 }
 
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
                         int mirror_num, struct extent_buffer **eb)
 {
        struct extent_buffer *buf = NULL;
@@ -1094,7 +1095,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
        struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
        int ret;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return 0;
 
@@ -1125,12 +1126,11 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 }
 
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
-                                                u64 bytenr, u32 blocksize)
+                                                u64 bytenr)
 {
        if (btrfs_test_is_dummy_root(root))
-               return alloc_test_extent_buffer(root->fs_info, bytenr,
-                                               blocksize);
-       return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
+               return alloc_test_extent_buffer(root->fs_info, bytenr);
+       return alloc_extent_buffer(root->fs_info, bytenr);
 }
 
 
@@ -1152,7 +1152,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
        struct extent_buffer *buf = NULL;
        int ret;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return NULL;
 
@@ -1275,12 +1275,10 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
        memset(&root->root_key, 0, sizeof(root->root_key));
        memset(&root->root_item, 0, sizeof(root->root_item));
        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
-       memset(&root->root_kobj, 0, sizeof(root->root_kobj));
        if (fs_info)
                root->defrag_trans_start = fs_info->generation;
        else
                root->defrag_trans_start = 0;
-       init_completion(&root->kobj_unregister);
        root->root_key.objectid = objectid;
        root->anon_dev = 0;
 
@@ -1630,6 +1628,8 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
                                     bool check_ref)
 {
        struct btrfs_root *root;
+       struct btrfs_path *path;
+       struct btrfs_key key;
        int ret;
 
        if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
@@ -1669,8 +1669,17 @@ again:
        if (ret)
                goto fail;
 
-       ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
-                       location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       key.objectid = BTRFS_ORPHAN_OBJECTID;
+       key.type = BTRFS_ORPHAN_ITEM_KEY;
+       key.offset = location->objectid;
+
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
+       btrfs_free_path(path);
        if (ret < 0)
                goto fail;
        if (ret == 0)
@@ -2232,6 +2241,7 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->qgroup_op_lock);
        spin_lock_init(&fs_info->buffer_lock);
        spin_lock_init(&fs_info->unused_bgs_lock);
+       mutex_init(&fs_info->unused_bg_unpin_mutex);
        rwlock_init(&fs_info->tree_mod_log_lock);
        mutex_init(&fs_info->reloc_mutex);
        mutex_init(&fs_info->delalloc_root_mutex);
@@ -2496,7 +2506,7 @@ int open_ctree(struct super_block *sb,
                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
-               printk(KERN_ERR "BTRFS: has skinny extents\n");
+               printk(KERN_INFO "BTRFS: has skinny extents\n");
 
        /*
         * flag our filesystem as having big metadata blocks if
@@ -2520,7 +2530,7 @@ int open_ctree(struct super_block *sb,
         */
        if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
            (sectorsize != nodesize)) {
-               printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
+               printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes "
                                "are not allowed for mixed block groups on %s\n",
                                sb->s_id);
                goto fail_alloc;
@@ -2628,12 +2638,12 @@ int open_ctree(struct super_block *sb,
        sb->s_blocksize_bits = blksize_bits(sectorsize);
 
        if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
-               printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
+               printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id);
                goto fail_sb_buffer;
        }
 
        if (sectorsize != PAGE_SIZE) {
-               printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
+               printk(KERN_ERR "BTRFS: incompatible sector size (%lu) "
                       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
                goto fail_sb_buffer;
        }
@@ -2642,7 +2652,7 @@ int open_ctree(struct super_block *sb,
        ret = btrfs_read_sys_array(tree_root);
        mutex_unlock(&fs_info->chunk_mutex);
        if (ret) {
-               printk(KERN_WARNING "BTRFS: failed to read the system "
+               printk(KERN_ERR "BTRFS: failed to read the system "
                       "array on %s\n", sb->s_id);
                goto fail_sb_buffer;
        }
@@ -2657,7 +2667,7 @@ int open_ctree(struct super_block *sb,
                                           generation);
        if (!chunk_root->node ||
            !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
-               printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
+               printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
                       sb->s_id);
                goto fail_tree_roots;
        }
@@ -2669,7 +2679,7 @@ int open_ctree(struct super_block *sb,
 
        ret = btrfs_read_chunk_tree(chunk_root);
        if (ret) {
-               printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
+               printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n",
                       sb->s_id);
                goto fail_tree_roots;
        }
@@ -2681,7 +2691,7 @@ int open_ctree(struct super_block *sb,
        btrfs_close_extra_devices(fs_info, fs_devices, 0);
 
        if (!fs_devices->latest_bdev) {
-               printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
+               printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
                       sb->s_id);
                goto fail_tree_roots;
        }
@@ -2765,7 +2775,7 @@ retry_root_backup:
 
        ret = btrfs_recover_balance(fs_info);
        if (ret) {
-               printk(KERN_WARNING "BTRFS: failed to recover balance\n");
+               printk(KERN_ERR "BTRFS: failed to recover balance\n");
                goto fail_block_groups;
        }
 
@@ -3860,6 +3870,21 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
                                btrfs_super_log_root(sb));
 
+       /*
+        * Check the lower bound, the alignment and other constraints are
+        * checked later.
+        */
+       if (btrfs_super_nodesize(sb) < 4096) {
+               printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n",
+                               btrfs_super_nodesize(sb));
+               ret = -EINVAL;
+       }
+       if (btrfs_super_sectorsize(sb) < 4096) {
+               printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n",
+                               btrfs_super_sectorsize(sb));
+               ret = -EINVAL;
+       }
+
        if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
                printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
                                fs_info->fsid, sb->dev_item.fsid);
@@ -3873,6 +3898,10 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
        if (btrfs_super_num_devices(sb) > (1UL << 31))
                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
                                btrfs_super_num_devices(sb));
+       if (btrfs_super_num_devices(sb) == 0) {
+               printk(KERN_ERR "BTRFS: number of devices is 0\n");
+               ret = -EINVAL;
+       }
 
        if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
                printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
@@ -3880,6 +3909,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                ret = -EINVAL;
        }
 
+       /*
+        * Obvious sys_chunk_array corruptions, it must hold at least one key
+        * and one chunk
+        */
+       if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
+               printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
+                               btrfs_super_sys_array_size(sb),
+                               BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+               ret = -EINVAL;
+       }
+       if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
+                       + sizeof(struct btrfs_chunk)) {
+               printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n",
+                               btrfs_super_sys_array_size(sb),
+                               sizeof(struct btrfs_disk_key)
+                               + sizeof(struct btrfs_chunk));
+               ret = -EINVAL;
+       }
+
        /*
         * The generation is a global counter, we'll trust it more than the others
         * but it's still possible that it's the one that's wrong.
index 414651821fb3b38a62df3a72fa44877f66b2e602..27d44c0fd2364df69eafd15f61bd6a215696f91f 100644 (file)
@@ -46,11 +46,11 @@ struct btrfs_fs_devices;
 
 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
                                      u64 parent_transid);
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize);
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
+int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
                         int mirror_num, struct extent_buffer **eb);
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
-                                                  u64 bytenr, u32 blocksize);
+                                                  u64 bytenr);
 void clean_tree_block(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root, struct extent_buffer *buf);
 int open_ctree(struct super_block *sb,
index a684086c3c8123702cc41caa4d4dfe085aa7db3b..571f402d3fc46e5f0205451e85a7b78f3cc16b50 100644 (file)
@@ -74,8 +74,9 @@ enum {
        RESERVE_ALLOC_NO_ACCOUNT = 2,
 };
 
-static int update_block_group(struct btrfs_root *root,
-                             u64 bytenr, u64 num_bytes, int alloc);
+static int update_block_group(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root, u64 bytenr,
+                             u64 num_bytes, int alloc);
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 parent,
@@ -1925,7 +1926,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                         */
                        ret = 0;
                }
-               kfree(bbio);
+               btrfs_put_bbio(bbio);
        }
 
        if (actual_bytes)
@@ -2768,7 +2769,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_head *head;
        int ret;
        int run_all = count == (unsigned long)-1;
-       int run_most = 0;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
        if (trans->aborted)
@@ -2778,10 +2778,8 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                root = root->fs_info->tree_root;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       if (count == 0) {
+       if (count == 0)
                count = atomic_read(&delayed_refs->num_entries) * 2;
-               run_most = 1;
-       }
 
 again:
 #ifdef SCRAMBLE_DELAYED_REFS
@@ -3315,120 +3313,42 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root)
 {
        struct btrfs_block_group_cache *cache;
-       int err = 0;
+       struct btrfs_transaction *cur_trans = trans->transaction;
+       int ret = 0;
        struct btrfs_path *path;
-       u64 last = 0;
+
+       if (list_empty(&cur_trans->dirty_bgs))
+               return 0;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-again:
-       while (1) {
-               cache = btrfs_lookup_first_block_group(root->fs_info, last);
-               while (cache) {
-                       if (cache->disk_cache_state == BTRFS_DC_CLEAR)
-                               break;
-                       cache = next_block_group(root, cache);
-               }
-               if (!cache) {
-                       if (last == 0)
-                               break;
-                       last = 0;
-                       continue;
-               }
-               err = cache_save_setup(cache, trans, path);
-               last = cache->key.objectid + cache->key.offset;
-               btrfs_put_block_group(cache);
-       }
-
-       while (1) {
-               if (last == 0) {
-                       err = btrfs_run_delayed_refs(trans, root,
-                                                    (unsigned long)-1);
-                       if (err) /* File system offline */
-                               goto out;
-               }
-
-               cache = btrfs_lookup_first_block_group(root->fs_info, last);
-               while (cache) {
-                       if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
-                               btrfs_put_block_group(cache);
-                               goto again;
-                       }
-
-                       if (cache->dirty)
-                               break;
-                       cache = next_block_group(root, cache);
-               }
-               if (!cache) {
-                       if (last == 0)
-                               break;
-                       last = 0;
-                       continue;
-               }
-
-               if (cache->disk_cache_state == BTRFS_DC_SETUP)
-                       cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
-               cache->dirty = 0;
-               last = cache->key.objectid + cache->key.offset;
-
-               err = write_one_cache_group(trans, root, path, cache);
-               btrfs_put_block_group(cache);
-               if (err) /* File system offline */
-                       goto out;
-       }
-
-       while (1) {
-               /*
-                * I don't think this is needed since we're just marking our
-                * preallocated extent as written, but just in case it can't
-                * hurt.
-                */
-               if (last == 0) {
-                       err = btrfs_run_delayed_refs(trans, root,
-                                                    (unsigned long)-1);
-                       if (err) /* File system offline */
-                               goto out;
-               }
-
-               cache = btrfs_lookup_first_block_group(root->fs_info, last);
-               while (cache) {
-                       /*
-                        * Really this shouldn't happen, but it could if we
-                        * couldn't write the entire preallocated extent and
-                        * splitting the extent resulted in a new block.
-                        */
-                       if (cache->dirty) {
-                               btrfs_put_block_group(cache);
-                               goto again;
-                       }
-                       if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
-                               break;
-                       cache = next_block_group(root, cache);
-               }
-               if (!cache) {
-                       if (last == 0)
-                               break;
-                       last = 0;
-                       continue;
-               }
-
-               err = btrfs_write_out_cache(root, trans, cache, path);
-
-               /*
-                * If we didn't have an error then the cache state is still
-                * NEED_WRITE, so we can set it to WRITTEN.
-                */
-               if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
-                       cache->disk_cache_state = BTRFS_DC_WRITTEN;
-               last = cache->key.objectid + cache->key.offset;
+       /*
+        * We don't need the lock here since we are protected by the transaction
+        * commit.  We want to do the cache_save_setup first and then run the
+        * delayed refs to make sure we have the best chance at doing this all
+        * in one shot.
+        */
+       while (!list_empty(&cur_trans->dirty_bgs)) {
+               cache = list_first_entry(&cur_trans->dirty_bgs,
+                                        struct btrfs_block_group_cache,
+                                        dirty_list);
+               list_del_init(&cache->dirty_list);
+               if (cache->disk_cache_state == BTRFS_DC_CLEAR)
+                       cache_save_setup(cache, trans, path);
+               if (!ret)
+                       ret = btrfs_run_delayed_refs(trans, root,
+                                                    (unsigned long) -1);
+               if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
+                       btrfs_write_out_cache(root, trans, cache, path);
+               if (!ret)
+                       ret = write_one_cache_group(trans, root, path, cache);
                btrfs_put_block_group(cache);
        }
-out:
 
        btrfs_free_path(path);
-       return err;
+       return ret;
 }
 
 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -5043,19 +4963,25 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
 /**
  * drop_outstanding_extent - drop an outstanding extent
  * @inode: the inode we're dropping the extent for
+ * @num_bytes: the number of bytes we're relaseing.
  *
  * This is called when we are freeing up an outstanding extent, either called
  * after an error or after an extent is written.  This will return the number of
  * reserved extents that need to be freed.  This must be called with
  * BTRFS_I(inode)->lock held.
  */
-static unsigned drop_outstanding_extent(struct inode *inode)
+static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
 {
        unsigned drop_inode_space = 0;
        unsigned dropped_extents = 0;
+       unsigned num_extents = 0;
 
-       BUG_ON(!BTRFS_I(inode)->outstanding_extents);
-       BTRFS_I(inode)->outstanding_extents--;
+       num_extents = (unsigned)div64_u64(num_bytes +
+                                         BTRFS_MAX_EXTENT_SIZE - 1,
+                                         BTRFS_MAX_EXTENT_SIZE);
+       ASSERT(num_extents);
+       ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
+       BTRFS_I(inode)->outstanding_extents -= num_extents;
 
        if (BTRFS_I(inode)->outstanding_extents == 0 &&
            test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
@@ -5226,7 +5152,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
 out_fail:
        spin_lock(&BTRFS_I(inode)->lock);
-       dropped = drop_outstanding_extent(inode);
+       dropped = drop_outstanding_extent(inode, num_bytes);
        /*
         * If the inodes csum_bytes is the same as the original
         * csum_bytes then we know we haven't raced with any free()ers
@@ -5305,7 +5231,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
        spin_lock(&BTRFS_I(inode)->lock);
-       dropped = drop_outstanding_extent(inode);
+       dropped = drop_outstanding_extent(inode, num_bytes);
 
        if (num_bytes)
                to_free = calc_csum_metadata_size(inode, num_bytes, 0);
@@ -5375,8 +5301,9 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
        btrfs_free_reserved_data_space(inode, num_bytes);
 }
 
-static int update_block_group(struct btrfs_root *root,
-                             u64 bytenr, u64 num_bytes, int alloc)
+static int update_block_group(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root, u64 bytenr,
+                             u64 num_bytes, int alloc)
 {
        struct btrfs_block_group_cache *cache = NULL;
        struct btrfs_fs_info *info = root->fs_info;
@@ -5414,6 +5341,14 @@ static int update_block_group(struct btrfs_root *root,
                if (!alloc && cache->cached == BTRFS_CACHE_NO)
                        cache_block_group(cache, 1);
 
+               spin_lock(&trans->transaction->dirty_bgs_lock);
+               if (list_empty(&cache->dirty_list)) {
+                       list_add_tail(&cache->dirty_list,
+                                     &trans->transaction->dirty_bgs);
+                       btrfs_get_block_group(cache);
+               }
+               spin_unlock(&trans->transaction->dirty_bgs_lock);
+
                byte_in_group = bytenr - cache->key.objectid;
                WARN_ON(byte_in_group > cache->key.offset);
 
@@ -5424,7 +5359,6 @@ static int update_block_group(struct btrfs_root *root,
                    cache->disk_cache_state < BTRFS_DC_CLEAR)
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
 
-               cache->dirty = 1;
                old_val = btrfs_block_group_used(&cache->item);
                num_bytes = min(total, cache->key.offset - byte_in_group);
                if (alloc) {
@@ -5807,10 +5741,13 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
                unpin = &fs_info->freed_extents[0];
 
        while (1) {
+               mutex_lock(&fs_info->unused_bg_unpin_mutex);
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY, NULL);
-               if (ret)
+               if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        break;
+               }
 
                if (btrfs_test_opt(root, DISCARD))
                        ret = btrfs_discard_extent(root, start,
@@ -5818,6 +5755,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 
                clear_extent_dirty(unpin, start, end, GFP_NOFS);
                unpin_extent_range(root, start, end, true);
+               mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                cond_resched();
        }
 
@@ -6103,7 +6041,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                        }
                }
 
-               ret = update_block_group(root, bytenr, num_bytes, 0);
+               ret = update_block_group(trans, root, bytenr, num_bytes, 0);
                if (ret) {
                        btrfs_abort_transaction(trans, extent_root, ret);
                        goto out;
@@ -6205,7 +6143,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct extent_buffer *buf,
                           u64 parent, int last_ref)
 {
-       struct btrfs_block_group_cache *cache = NULL;
        int pin = 1;
        int ret;
 
@@ -6221,17 +6158,20 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
        if (!last_ref)
                return;
 
-       cache = btrfs_lookup_block_group(root->fs_info, buf->start);
-
        if (btrfs_header_generation(buf) == trans->transid) {
+               struct btrfs_block_group_cache *cache;
+
                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
                        ret = check_ref_cleanup(trans, root, buf->start);
                        if (!ret)
                                goto out;
                }
 
+               cache = btrfs_lookup_block_group(root->fs_info, buf->start);
+
                if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
                        pin_down_extent(root, cache, buf->start, buf->len, 1);
+                       btrfs_put_block_group(cache);
                        goto out;
                }
 
@@ -6239,6 +6179,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 
                btrfs_add_free_space(cache, buf->start, buf->len);
                btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
+               btrfs_put_block_group(cache);
                trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
                pin = 0;
        }
@@ -6253,7 +6194,6 @@ out:
         * anymore.
         */
        clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
-       btrfs_put_block_group(cache);
 }
 
 /* Can return -ENOMEM */
@@ -7063,7 +7003,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
        if (ret)
                return ret;
 
-       ret = update_block_group(root, ins->objectid, ins->offset, 1);
+       ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
        if (ret) { /* -ENOENT, logic error */
                btrfs_err(fs_info, "update block group failed for %llu %llu",
                        ins->objectid, ins->offset);
@@ -7152,7 +7092,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
                        return ret;
        }
 
-       ret = update_block_group(root, ins->objectid, root->nodesize, 1);
+       ret = update_block_group(trans, root, ins->objectid, root->nodesize,
+                                1);
        if (ret) { /* -ENOENT, logic error */
                btrfs_err(fs_info, "update block group failed for %llu %llu",
                        ins->objectid, ins->offset);
@@ -7217,11 +7158,11 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 
 static struct extent_buffer *
 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                     u64 bytenr, u32 blocksize, int level)
+                     u64 bytenr, int level)
 {
        struct extent_buffer *buf;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return ERR_PTR(-ENOMEM);
        btrfs_set_header_generation(buf, trans->transid);
@@ -7340,7 +7281,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 
        if (btrfs_test_is_dummy_root(root)) {
                buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
-                                           blocksize, level);
+                                           level);
                if (!IS_ERR(buf))
                        root->alloc_bytenr += blocksize;
                return buf;
@@ -7357,8 +7298,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                return ERR_PTR(ret);
        }
 
-       buf = btrfs_init_new_buffer(trans, root, ins.objectid,
-                                   blocksize, level);
+       buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
        BUG_ON(IS_ERR(buf)); /* -ENOMEM */
 
        if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
@@ -7487,7 +7427,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
                                continue;
                }
 reada:
-               readahead_tree_block(root, bytenr, blocksize);
+               readahead_tree_block(root, bytenr);
                nread++;
        }
        wc->reada_slot = slot;
@@ -7828,7 +7768,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 
        next = btrfs_find_tree_block(root, bytenr);
        if (!next) {
-               next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+               next = btrfs_find_create_tree_block(root, bytenr);
                if (!next)
                        return -ENOMEM;
                btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
@@ -8548,14 +8488,6 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       alloc_flags = update_block_group_flags(root, cache->flags);
-       if (alloc_flags != cache->flags) {
-               ret = do_chunk_alloc(trans, root, alloc_flags,
-                                    CHUNK_ALLOC_FORCE);
-               if (ret < 0)
-                       goto out;
-       }
-
        ret = set_block_group_ro(cache, 0);
        if (!ret)
                goto out;
@@ -8566,6 +8498,11 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
                goto out;
        ret = set_block_group_ro(cache, 0);
 out:
+       if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+               alloc_flags = update_block_group_flags(root, cache->flags);
+               check_system_chunk(trans, root, alloc_flags);
+       }
+
        btrfs_end_transaction(trans, root);
        return ret;
 }
@@ -9005,6 +8942,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
        INIT_LIST_HEAD(&cache->cluster_list);
        INIT_LIST_HEAD(&cache->bg_list);
        INIT_LIST_HEAD(&cache->ro_list);
+       INIT_LIST_HEAD(&cache->dirty_list);
        btrfs_init_free_space_ctl(cache);
        atomic_set(&cache->trimming, 0);
 
@@ -9068,9 +9006,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                         * b) Setting 'dirty flag' makes sure that we flush
                         *    the new space cache info onto disk.
                         */
-                       cache->disk_cache_state = BTRFS_DC_CLEAR;
                        if (btrfs_test_opt(root, SPACE_CACHE))
-                               cache->dirty = 1;
+                               cache->disk_cache_state = BTRFS_DC_CLEAR;
                }
 
                read_extent_buffer(leaf, &cache->item,
@@ -9460,6 +9397,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                }
        }
 
+       spin_lock(&trans->transaction->dirty_bgs_lock);
+       if (!list_empty(&block_group->dirty_list)) {
+               list_del_init(&block_group->dirty_list);
+               btrfs_put_block_group(block_group);
+       }
+       spin_unlock(&trans->transaction->dirty_bgs_lock);
+
        btrfs_remove_free_space_cache(block_group);
 
        spin_lock(&block_group->space_info->lock);
@@ -9611,7 +9555,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                 * Want to do this before we do anything else so we can recover
                 * properly if we fail to join the transaction.
                 */
-               trans = btrfs_join_transaction(root);
+               /* 1 for btrfs_orphan_reserve_metadata() */
+               trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans)) {
                        btrfs_set_block_group_rw(root, block_group);
                        ret = PTR_ERR(trans);
@@ -9624,18 +9569,33 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                 */
                start = block_group->key.objectid;
                end = start + block_group->key.offset - 1;
+               /*
+                * Hold the unused_bg_unpin_mutex lock to avoid racing with
+                * btrfs_finish_extent_commit(). If we are at transaction N,
+                * another task might be running finish_extent_commit() for the
+                * previous transaction N - 1, and have seen a range belonging
+                * to the block group in freed_extents[] before we were able to
+                * clear the whole block group range from freed_extents[]. This
+                * means that task can lookup for the block group after we
+                * unpinned it from freed_extents[] and removed it, leading to
+                * a BUG_ON() at btrfs_unpin_extent_range().
+                */
+               mutex_lock(&fs_info->unused_bg_unpin_mutex);
                ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
                                  EXTENT_DIRTY, GFP_NOFS);
                if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        btrfs_set_block_group_rw(root, block_group);
                        goto end_trans;
                }
                ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
                                  EXTENT_DIRTY, GFP_NOFS);
                if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        btrfs_set_block_group_rw(root, block_group);
                        goto end_trans;
                }
+               mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 
                /* Reset pinned so btrfs_put_block_group doesn't complain */
                block_group->pinned = 0;
index c73df6a7c9b6ce0b8ee9beec01f4f0fe48ba5705..c7233ff1d533b653b8b9e7f29e022e9126e5f38a 100644 (file)
@@ -64,7 +64,7 @@ void btrfs_leak_debug_check(void)
 
        while (!list_empty(&states)) {
                state = list_entry(states.next, struct extent_state, leak_list);
-               pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
+               pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
                       state->start, state->end, state->state,
                       extent_state_in_tree(state),
                       atomic_read(&state->refs));
@@ -396,21 +396,21 @@ static void merge_state(struct extent_io_tree *tree,
 }
 
 static void set_state_cb(struct extent_io_tree *tree,
-                        struct extent_state *state, unsigned long *bits)
+                        struct extent_state *state, unsigned *bits)
 {
        if (tree->ops && tree->ops->set_bit_hook)
                tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 }
 
 static void clear_state_cb(struct extent_io_tree *tree,
-                          struct extent_state *state, unsigned long *bits)
+                          struct extent_state *state, unsigned *bits)
 {
        if (tree->ops && tree->ops->clear_bit_hook)
                tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 }
 
 static void set_state_bits(struct extent_io_tree *tree,
-                          struct extent_state *state, unsigned long *bits);
+                          struct extent_state *state, unsigned *bits);
 
 /*
  * insert an extent_state struct into the tree.  'bits' are set on the
@@ -426,7 +426,7 @@ static int insert_state(struct extent_io_tree *tree,
                        struct extent_state *state, u64 start, u64 end,
                        struct rb_node ***p,
                        struct rb_node **parent,
-                       unsigned long *bits)
+                       unsigned *bits)
 {
        struct rb_node *node;
 
@@ -511,10 +511,10 @@ static struct extent_state *next_state(struct extent_state *state)
  */
 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
                                            struct extent_state *state,
-                                           unsigned long *bits, int wake)
+                                           unsigned *bits, int wake)
 {
        struct extent_state *next;
-       unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
+       unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
 
        if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
                u64 range = state->end - state->start + 1;
@@ -570,7 +570,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
  * This takes the tree lock, and returns 0 on success and < 0 on error.
  */
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, int wake, int delete,
+                    unsigned bits, int wake, int delete,
                     struct extent_state **cached_state,
                     gfp_t mask)
 {
@@ -789,9 +789,9 @@ out:
 
 static void set_state_bits(struct extent_io_tree *tree,
                           struct extent_state *state,
-                          unsigned long *bits)
+                          unsigned *bits)
 {
-       unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
+       unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 
        set_state_cb(tree, state, bits);
        if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
@@ -803,7 +803,7 @@ static void set_state_bits(struct extent_io_tree *tree,
 
 static void cache_state_if_flags(struct extent_state *state,
                                 struct extent_state **cached_ptr,
-                                const u64 flags)
+                                unsigned flags)
 {
        if (cached_ptr && !(*cached_ptr)) {
                if (!flags || (state->state & flags)) {
@@ -833,7 +833,7 @@ static void cache_state(struct extent_state *state,
 
 static int __must_check
 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                unsigned long bits, unsigned long exclusive_bits,
+                unsigned bits, unsigned exclusive_bits,
                 u64 *failed_start, struct extent_state **cached_state,
                 gfp_t mask)
 {
@@ -1034,7 +1034,7 @@ search_again:
 }
 
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, u64 * failed_start,
+                  unsigned bits, u64 * failed_start,
                   struct extent_state **cached_state, gfp_t mask)
 {
        return __set_extent_bit(tree, start, end, bits, 0, failed_start,
@@ -1060,7 +1060,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  * boundary bits like LOCK.
  */
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      unsigned long bits, unsigned long clear_bits,
+                      unsigned bits, unsigned clear_bits,
                       struct extent_state **cached_state, gfp_t mask)
 {
        struct extent_state *state;
@@ -1268,14 +1268,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                   unsigned long bits, gfp_t mask)
+                   unsigned bits, gfp_t mask)
 {
        return set_extent_bit(tree, start, end, bits, NULL,
                              NULL, mask);
 }
 
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                     unsigned long bits, gfp_t mask)
+                     unsigned bits, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
 }
@@ -1330,10 +1330,11 @@ int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  * us if waiting is desired.
  */
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, struct extent_state **cached_state)
+                    unsigned bits, struct extent_state **cached_state)
 {
        int err;
        u64 failed_start;
+
        while (1) {
                err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
                                       EXTENT_LOCKED, &failed_start,
@@ -1440,7 +1441,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  */
 static struct extent_state *
 find_first_extent_bit_state(struct extent_io_tree *tree,
-                           u64 start, unsigned long bits)
+                           u64 start, unsigned bits)
 {
        struct rb_node *node;
        struct extent_state *state;
@@ -1474,7 +1475,7 @@ out:
  * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-                         u64 *start_ret, u64 *end_ret, unsigned long bits,
+                         u64 *start_ret, u64 *end_ret, unsigned bits,
                          struct extent_state **cached_state)
 {
        struct extent_state *state;
@@ -1753,7 +1754,7 @@ out_failed:
 
 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
                                 struct page *locked_page,
-                                unsigned long clear_bits,
+                                unsigned clear_bits,
                                 unsigned long page_ops)
 {
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
@@ -1810,7 +1811,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
  */
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end, u64 max_bytes,
-                    unsigned long bits, int contig)
+                    unsigned bits, int contig)
 {
        struct rb_node *node;
        struct extent_state *state;
@@ -1928,7 +1929,7 @@ out:
  * range is found set.
  */
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, int filled, struct extent_state *cached)
+                  unsigned bits, int filled, struct extent_state *cached)
 {
        struct extent_state *state = NULL;
        struct rb_node *node;
@@ -2057,7 +2058,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
        sector = bbio->stripes[mirror_num-1].physical >> 9;
        bio->bi_iter.bi_sector = sector;
        dev = bbio->stripes[mirror_num-1].dev;
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
                bio_put(bio);
                return -EIO;
@@ -2816,8 +2817,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                    bio_add_page(bio, page, page_size, offset) < page_size) {
                        ret = submit_one_bio(rw, bio, mirror_num,
                                             prev_bio_flags);
-                       if (ret < 0)
+                       if (ret < 0) {
+                               *bio_ret = NULL;
                                return ret;
+                       }
                        bio = NULL;
                } else {
                        return 0;
@@ -3239,7 +3242,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                                               page,
                                               &delalloc_start,
                                               &delalloc_end,
-                                              128 * 1024 * 1024);
+                                              BTRFS_MAX_EXTENT_SIZE);
                if (nr_delalloc == 0) {
                        delalloc_start = delalloc_end + 1;
                        continue;
@@ -4598,11 +4601,11 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 
 static struct extent_buffer *
 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
-                     unsigned long len, gfp_t mask)
+                     unsigned long len)
 {
        struct extent_buffer *eb = NULL;
 
-       eb = kmem_cache_zalloc(extent_buffer_cache, mask);
+       eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS);
        if (eb == NULL)
                return NULL;
        eb->start = start;
@@ -4643,7 +4646,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
        struct extent_buffer *new;
        unsigned long num_pages = num_extent_pages(src->start, src->len);
 
-       new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
+       new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
        if (new == NULL)
                return NULL;
 
@@ -4666,13 +4669,26 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
        return new;
 }
 
-struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
+struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+                                               u64 start)
 {
        struct extent_buffer *eb;
-       unsigned long num_pages = num_extent_pages(0, len);
+       unsigned long len;
+       unsigned long num_pages;
        unsigned long i;
 
-       eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
+       if (!fs_info) {
+               /*
+                * Called only from tests that don't always have a fs_info
+                * available, but we know that nodesize is 4096
+                */
+               len = 4096;
+       } else {
+               len = fs_info->tree_root->nodesize;
+       }
+       num_pages = num_extent_pages(0, len);
+
+       eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
                return NULL;
 
@@ -4762,7 +4778,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start, unsigned long len)
+                                              u64 start)
 {
        struct extent_buffer *eb, *exists = NULL;
        int ret;
@@ -4770,7 +4786,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
-       eb = alloc_dummy_extent_buffer(start, len);
+       eb = alloc_dummy_extent_buffer(fs_info, start);
        if (!eb)
                return NULL;
        eb->fs_info = fs_info;
@@ -4808,8 +4824,9 @@ free_eb:
 #endif
 
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
-                                         u64 start, unsigned long len)
+                                         u64 start)
 {
+       unsigned long len = fs_info->tree_root->nodesize;
        unsigned long num_pages = num_extent_pages(start, len);
        unsigned long i;
        unsigned long index = start >> PAGE_CACHE_SHIFT;
@@ -4824,7 +4841,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        if (eb)
                return eb;
 
-       eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
+       eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
                return NULL;
 
index ece9ce87edff521fa0a54f38bca0cb47059c638b..695b0ccfb7553e786f4c9634d3a86c0263c86321 100644 (file)
@@ -4,22 +4,22 @@
 #include <linux/rbtree.h>
 
 /* bits for the extent state */
-#define EXTENT_DIRTY 1
-#define EXTENT_WRITEBACK (1 << 1)
-#define EXTENT_UPTODATE (1 << 2)
-#define EXTENT_LOCKED (1 << 3)
-#define EXTENT_NEW (1 << 4)
-#define EXTENT_DELALLOC (1 << 5)
-#define EXTENT_DEFRAG (1 << 6)
-#define EXTENT_BOUNDARY (1 << 9)
-#define EXTENT_NODATASUM (1 << 10)
-#define EXTENT_DO_ACCOUNTING (1 << 11)
-#define EXTENT_FIRST_DELALLOC (1 << 12)
-#define EXTENT_NEED_WAIT (1 << 13)
-#define EXTENT_DAMAGED (1 << 14)
-#define EXTENT_NORESERVE (1 << 15)
-#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
-#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
+#define EXTENT_DIRTY           (1U << 0)
+#define EXTENT_WRITEBACK       (1U << 1)
+#define EXTENT_UPTODATE                (1U << 2)
+#define EXTENT_LOCKED          (1U << 3)
+#define EXTENT_NEW             (1U << 4)
+#define EXTENT_DELALLOC                (1U << 5)
+#define EXTENT_DEFRAG          (1U << 6)
+#define EXTENT_BOUNDARY                (1U << 9)
+#define EXTENT_NODATASUM       (1U << 10)
+#define EXTENT_DO_ACCOUNTING   (1U << 11)
+#define EXTENT_FIRST_DELALLOC  (1U << 12)
+#define EXTENT_NEED_WAIT       (1U << 13)
+#define EXTENT_DAMAGED         (1U << 14)
+#define EXTENT_NORESERVE       (1U << 15)
+#define EXTENT_IOBITS          (EXTENT_LOCKED | EXTENT_WRITEBACK)
+#define EXTENT_CTLBITS         (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
 
 /*
  * flags for bio submission. The high bits indicate the compression
@@ -81,9 +81,9 @@ struct extent_io_ops {
        int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
                                      struct extent_state *state, int uptodate);
        void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
-                            unsigned long *bits);
+                            unsigned *bits);
        void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
-                              unsigned long *bits);
+                              unsigned *bits);
        void (*merge_extent_hook)(struct inode *inode,
                                  struct extent_state *new,
                                  struct extent_state *other);
@@ -108,7 +108,7 @@ struct extent_state {
        /* ADD NEW ELEMENTS AFTER THIS */
        wait_queue_head_t wq;
        atomic_t refs;
-       unsigned long state;
+       unsigned state;
 
        /* for use by the FS */
        u64 private;
@@ -188,7 +188,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
 int try_release_extent_buffer(struct page *page);
 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, struct extent_state **cached);
+                    unsigned bits, struct extent_state **cached);
 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
                         struct extent_state **cached, gfp_t mask);
@@ -202,21 +202,21 @@ void extent_io_exit(void);
 
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end,
-                    u64 max_bytes, unsigned long bits, int contig);
+                    u64 max_bytes, unsigned bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, int filled,
+                  unsigned bits, int filled,
                   struct extent_state *cached_state);
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                     unsigned long bits, gfp_t mask);
+                     unsigned bits, gfp_t mask);
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, int wake, int delete,
+                    unsigned bits, int wake, int delete,
                     struct extent_state **cached, gfp_t mask);
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                   unsigned long bits, gfp_t mask);
+                   unsigned bits, gfp_t mask);
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, u64 *failed_start,
+                  unsigned bits, u64 *failed_start,
                   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
@@ -229,14 +229,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
                       gfp_t mask);
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      unsigned long bits, unsigned long clear_bits,
+                      unsigned bits, unsigned clear_bits,
                       struct extent_state **cached_state, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
                      struct extent_state **cached_state, gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-                         u64 *start_ret, u64 *end_ret, unsigned long bits,
+                         u64 *start_ret, u64 *end_ret, unsigned bits,
                          struct extent_state **cached_state);
 int extent_invalidatepage(struct extent_io_tree *tree,
                          struct page *page, unsigned long offset);
@@ -262,8 +262,9 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 void set_page_extent_mapped(struct page *page);
 
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
-                                         u64 start, unsigned long len);
-struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
+                                         u64 start);
+struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+               u64 start);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
                                         u64 start);
@@ -322,7 +323,7 @@ int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
                                 struct page *locked_page,
-                                unsigned long bits_to_clear,
+                                unsigned bits_to_clear,
                                 unsigned long page_ops);
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
@@ -377,5 +378,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
                                      u64 *end, u64 max_bytes);
 #endif
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start, unsigned long len);
+                                              u64 start);
 #endif
index d6c03f7f136b359c534668a38f9e9a72d299eb66..a71978578fa71a11d83f8c435541b807eedc2b51 100644 (file)
@@ -651,15 +651,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        struct io_ctl io_ctl;
        struct btrfs_key key;
        struct btrfs_free_space *e, *n;
-       struct list_head bitmaps;
+       LIST_HEAD(bitmaps);
        u64 num_entries;
        u64 num_bitmaps;
        u64 generation;
        u8 type;
        int ret = 0;
 
-       INIT_LIST_HEAD(&bitmaps);
-
        /* Nothing in the space cache, goodbye */
        if (!i_size_read(inode))
                return 0;
@@ -1243,6 +1241,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct inode *inode;
        int ret = 0;
+       enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN;
 
        root = root->fs_info->tree_root;
 
@@ -1266,9 +1265,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
                                      path, block_group->key.objectid);
        if (ret) {
-               spin_lock(&block_group->lock);
-               block_group->disk_cache_state = BTRFS_DC_ERROR;
-               spin_unlock(&block_group->lock);
+               dcs = BTRFS_DC_ERROR;
                ret = 0;
 #ifdef DEBUG
                btrfs_err(root->fs_info,
@@ -1277,6 +1274,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 #endif
        }
 
+       spin_lock(&block_group->lock);
+       block_group->disk_cache_state = dcs;
+       spin_unlock(&block_group->lock);
        iput(inode);
        return ret;
 }
@@ -2903,7 +2903,6 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
        trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
                                 min_bytes);
 
-       INIT_LIST_HEAD(&bitmaps);
        ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
                                      bytes + empty_size,
                                      cont1_bytes, min_bytes);
index 8ffa4783cbf438ed182e6f94e39711ded5207558..265e03c73f4daaea4ac1b69ab0a606c63f50895f 100644 (file)
@@ -344,6 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        path->leave_spinning = 1;
+       path->skip_release_on_error = 1;
        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                      ins_len);
        if (ret == -EEXIST) {
@@ -362,8 +363,12 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                ptr = (unsigned long)(ref + 1);
                ret = 0;
        } else if (ret < 0) {
-               if (ret == -EOVERFLOW)
-                       ret = -EMLINK;
+               if (ret == -EOVERFLOW) {
+                       if (find_name_in_backref(path, name, name_len, &ref))
+                               ret = -EEXIST;
+                       else
+                               ret = -EMLINK;
+               }
                goto out;
        } else {
                ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
index 54bcf639d1cf1f48df951477b16e6266402f7bc6..a85c23dfcddbcfd992069811f24b116d74e4dc21 100644 (file)
@@ -1530,10 +1530,45 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
 static void btrfs_split_extent_hook(struct inode *inode,
                                    struct extent_state *orig, u64 split)
 {
+       u64 size;
+
        /* not delalloc, ignore it */
        if (!(orig->state & EXTENT_DELALLOC))
                return;
 
+       size = orig->end - orig->start + 1;
+       if (size > BTRFS_MAX_EXTENT_SIZE) {
+               u64 num_extents;
+               u64 new_size;
+
+               /*
+                * We need the largest size of the remaining extent to see if we
+                * need to add a new outstanding extent.  Think of the following
+                * case
+                *
+                * [MEAX_EXTENT_SIZEx2 - 4k][4k]
+                *
+                * The new_size would just be 4k and we'd think we had enough
+                * outstanding extents for this if we only took one side of the
+                * split, same goes for the other direction.  We need to see if
+                * the larger size still is the same amount of extents as the
+                * original size, because if it is we need to add a new
+                * outstanding extent.  But if we split up and the larger size
+                * is less than the original then we are good to go since we've
+                * already accounted for the extra extent in our original
+                * accounting.
+                */
+               new_size = orig->end - split + 1;
+               if ((split - orig->start) > new_size)
+                       new_size = split - orig->start;
+
+               num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
+                                       BTRFS_MAX_EXTENT_SIZE);
+               if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
+                             BTRFS_MAX_EXTENT_SIZE) < num_extents)
+                       return;
+       }
+
        spin_lock(&BTRFS_I(inode)->lock);
        BTRFS_I(inode)->outstanding_extents++;
        spin_unlock(&BTRFS_I(inode)->lock);
@@ -1549,10 +1584,34 @@ static void btrfs_merge_extent_hook(struct inode *inode,
                                    struct extent_state *new,
                                    struct extent_state *other)
 {
+       u64 new_size, old_size;
+       u64 num_extents;
+
        /* not delalloc, ignore it */
        if (!(other->state & EXTENT_DELALLOC))
                return;
 
+       old_size = other->end - other->start + 1;
+       new_size = old_size + (new->end - new->start + 1);
+
+       /* we're not bigger than the max, unreserve the space and go */
+       if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
+               spin_lock(&BTRFS_I(inode)->lock);
+               BTRFS_I(inode)->outstanding_extents--;
+               spin_unlock(&BTRFS_I(inode)->lock);
+               return;
+       }
+
+       /*
+        * If we grew by another max_extent, just return, we want to keep that
+        * reserved amount.
+        */
+       num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
+                               BTRFS_MAX_EXTENT_SIZE);
+       if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
+                     BTRFS_MAX_EXTENT_SIZE) > num_extents)
+               return;
+
        spin_lock(&BTRFS_I(inode)->lock);
        BTRFS_I(inode)->outstanding_extents--;
        spin_unlock(&BTRFS_I(inode)->lock);
@@ -1604,7 +1663,7 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
  * have pending delalloc work to be done.
  */
 static void btrfs_set_bit_hook(struct inode *inode,
-                              struct extent_state *state, unsigned long *bits)
+                              struct extent_state *state, unsigned *bits)
 {
 
        if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
@@ -1645,9 +1704,11 @@ static void btrfs_set_bit_hook(struct inode *inode,
  */
 static void btrfs_clear_bit_hook(struct inode *inode,
                                 struct extent_state *state,
-                                unsigned long *bits)
+                                unsigned *bits)
 {
        u64 len = state->end + 1 - state->start;
+       u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
+                                   BTRFS_MAX_EXTENT_SIZE);
 
        spin_lock(&BTRFS_I(inode)->lock);
        if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
@@ -1667,7 +1728,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
                        *bits &= ~EXTENT_FIRST_DELALLOC;
                } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
                        spin_lock(&BTRFS_I(inode)->lock);
-                       BTRFS_I(inode)->outstanding_extents--;
+                       BTRFS_I(inode)->outstanding_extents -= num_extents;
                        spin_unlock(&BTRFS_I(inode)->lock);
                }
 
@@ -2945,7 +3006,7 @@ static int __readpage_endio_check(struct inode *inode,
        return 0;
 zeroit:
        if (__ratelimit(&_rs))
-               btrfs_info(BTRFS_I(inode)->root->fs_info,
+               btrfs_warn(BTRFS_I(inode)->root->fs_info,
                           "csum failed ino %llu off %llu csum %u expected csum %u",
                           btrfs_ino(inode), start, csum, csum_expected);
        memset(kaddr + pgoff, 1, len);
@@ -3407,7 +3468,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 
 out:
        if (ret)
-               btrfs_crit(root->fs_info,
+               btrfs_err(root->fs_info,
                        "could not do orphan cleanup %d", ret);
        btrfs_free_path(path);
        return ret;
@@ -3490,7 +3551,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_inode_item *inode_item;
-       struct btrfs_timespec *tspec;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_key location;
        unsigned long ptr;
@@ -3527,17 +3587,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
        i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
        btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
 
-       tspec = btrfs_inode_atime(inode_item);
-       inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
-       inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+       inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
+       inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
+
+       inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
+       inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
 
-       tspec = btrfs_inode_mtime(inode_item);
-       inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
-       inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+       inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
+       inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
 
-       tspec = btrfs_inode_ctime(inode_item);
-       inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
-       inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+       BTRFS_I(inode)->i_otime.tv_sec =
+               btrfs_timespec_sec(leaf, &inode_item->otime);
+       BTRFS_I(inode)->i_otime.tv_nsec =
+               btrfs_timespec_nsec(leaf, &inode_item->otime);
 
        inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
        BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
@@ -3656,21 +3718,26 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
        btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->atime,
                                     inode->i_atime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->atime,
                                      inode->i_atime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->mtime,
                                     inode->i_mtime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->mtime,
                                      inode->i_mtime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->ctime,
                                     inode->i_ctime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->ctime,
                                      inode->i_ctime.tv_nsec, &token);
 
+       btrfs_set_token_timespec_sec(leaf, &item->otime,
+                                    BTRFS_I(inode)->i_otime.tv_sec, &token);
+       btrfs_set_token_timespec_nsec(leaf, &item->otime,
+                                     BTRFS_I(inode)->i_otime.tv_nsec, &token);
+
        btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
                                     &token);
        btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
@@ -5007,6 +5074,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
        struct btrfs_root *new_root;
        struct btrfs_root_ref *ref;
        struct extent_buffer *leaf;
+       struct btrfs_key key;
        int ret;
        int err = 0;
 
@@ -5017,9 +5085,12 @@ static int fixup_tree_root_location(struct btrfs_root *root,
        }
 
        err = -ENOENT;
-       ret = btrfs_find_item(root->fs_info->tree_root, path,
-                               BTRFS_I(dir)->root->root_key.objectid,
-                               location->objectid, BTRFS_ROOT_REF_KEY, NULL);
+       key.objectid = BTRFS_I(dir)->root->root_key.objectid;
+       key.type = BTRFS_ROOT_REF_KEY;
+       key.offset = location->objectid;
+
+       ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
+                               0, 0);
        if (ret) {
                if (ret < 0)
                        err = ret;
@@ -5258,7 +5329,10 @@ static struct inode *new_simple_dir(struct super_block *s,
        inode->i_op = &btrfs_dir_ro_inode_operations;
        inode->i_fop = &simple_dir_operations;
        inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
-       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+       inode->i_mtime = CURRENT_TIME;
+       inode->i_atime = inode->i_mtime;
+       inode->i_ctime = inode->i_mtime;
+       BTRFS_I(inode)->i_otime = inode->i_mtime;
 
        return inode;
 }
@@ -5826,7 +5900,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 
        inode_init_owner(inode, dir, mode);
        inode_set_bytes(inode, 0);
-       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+
+       inode->i_mtime = CURRENT_TIME;
+       inode->i_atime = inode->i_mtime;
+       inode->i_ctime = inode->i_mtime;
+       BTRFS_I(inode)->i_otime = inode->i_mtime;
+
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                  struct btrfs_inode_item);
        memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
@@ -7134,11 +7213,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
        u64 start = iblock << inode->i_blkbits;
        u64 lockstart, lockend;
        u64 len = bh_result->b_size;
+       u64 orig_len = len;
        int unlock_bits = EXTENT_LOCKED;
        int ret = 0;
 
        if (create)
-               unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
+               unlock_bits |= EXTENT_DIRTY;
        else
                len = min_t(u64, len, root->sectorsize);
 
@@ -7269,14 +7349,12 @@ unlock:
                if (start + len > i_size_read(inode))
                        i_size_write(inode, start + len);
 
-               spin_lock(&BTRFS_I(inode)->lock);
-               BTRFS_I(inode)->outstanding_extents++;
-               spin_unlock(&BTRFS_I(inode)->lock);
-
-               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
-                                    lockstart + len - 1, EXTENT_DELALLOC, NULL,
-                                    &cached_state, GFP_NOFS);
-               BUG_ON(ret);
+               if (len < orig_len) {
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       BTRFS_I(inode)->outstanding_extents++;
+                       spin_unlock(&BTRFS_I(inode)->lock);
+               }
+               btrfs_free_reserved_data_space(inode, len);
        }
 
        /*
@@ -7805,8 +7883,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        }
 
        /* async crcs make it difficult to collect full stripe writes. */
-       if (btrfs_get_alloc_profile(root, 1) &
-           (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
+       if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
                async_submit = 0;
        else
                async_submit = 1;
@@ -8053,8 +8130,6 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode,
                                                     count - (size_t)ret);
-               else
-                       btrfs_delalloc_release_metadata(inode, 0);
        }
 out:
        if (wakeup)
@@ -8575,6 +8650,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
 
        ei->delayed_node = NULL;
 
+       ei->i_otime.tv_sec = 0;
+       ei->i_otime.tv_nsec = 0;
+
        inode = &ei->vfs_inode;
        extent_map_tree_init(&ei->extent_tree);
        extent_io_tree_init(&ei->io_tree, &inode->i_data);
index d49fe8a0f6b5c9ada112830f6f27a8eafe202c92..74609b931ba5564da01de0955b8aa1d3142512d7 100644 (file)
@@ -776,11 +776,11 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
            IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
                return -EPERM;
        if (isdir) {
-               if (!S_ISDIR(victim->d_inode->i_mode))
+               if (!d_is_dir(victim))
                        return -ENOTDIR;
                if (IS_ROOT(victim))
                        return -EBUSY;
-       } else if (S_ISDIR(victim->d_inode->i_mode))
+       } else if (d_is_dir(victim))
                return -EISDIR;
        if (IS_DEADDIR(dir))
                return -ENOENT;
index 48b60dbf807fd170593b2e0c7d0a3d1a36f26f58..97159a8e91d40b24ca1a8f6367892ecf2ad8b960 100644 (file)
@@ -1431,9 +1431,8 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
                qgroup = u64_to_ptr(unode->aux);
                qgroup->rfer += sign * oper->num_bytes;
                qgroup->rfer_cmpr += sign * oper->num_bytes;
+               WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
                qgroup->excl += sign * oper->num_bytes;
-               if (sign < 0)
-                       WARN_ON(qgroup->excl < oper->num_bytes);
                qgroup->excl_cmpr += sign * oper->num_bytes;
                qgroup_dirty(fs_info, qgroup);
 
index 8ab2a17bbba8b754bdcf90721d3ca40fc0e2f4b6..5264858ed7683f2306ccba372bf510c4509862fa 100644 (file)
  */
 #define RBIO_CACHE_READY_BIT   3
 
-/*
- * bbio and raid_map is managed by the caller, so we shouldn't free
- * them here. And besides that, all rbios with this flag should not
- * be cached, because we need raid_map to check the rbios' stripe
- * is the same or not, but it is very likely that the caller has
- * free raid_map, so don't cache those rbios.
- */
-#define RBIO_HOLD_BBIO_MAP_BIT 4
-
 #define RBIO_CACHE_SIZE 1024
 
 enum btrfs_rbio_ops {
@@ -79,13 +70,6 @@ struct btrfs_raid_bio {
        struct btrfs_fs_info *fs_info;
        struct btrfs_bio *bbio;
 
-       /*
-        * logical block numbers for the start of each stripe
-        * The last one or two are p/q.  These are sorted,
-        * so raid_map[0] is the start of our full stripe
-        */
-       u64 *raid_map;
-
        /* while we're doing rmw on a stripe
         * we put it into a hash table so we can
         * lock the stripe and merge more rbios
@@ -303,7 +287,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
  */
 static int rbio_bucket(struct btrfs_raid_bio *rbio)
 {
-       u64 num = rbio->raid_map[0];
+       u64 num = rbio->bbio->raid_map[0];
 
        /*
         * we shift down quite a bit.  We're using byte
@@ -606,8 +590,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
            test_bit(RBIO_CACHE_BIT, &cur->flags))
                return 0;
 
-       if (last->raid_map[0] !=
-           cur->raid_map[0])
+       if (last->bbio->raid_map[0] !=
+           cur->bbio->raid_map[0])
                return 0;
 
        /* we can't merge with different operations */
@@ -689,7 +673,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
        spin_lock_irqsave(&h->lock, flags);
        list_for_each_entry(cur, &h->hash_list, hash_list) {
                walk++;
-               if (cur->raid_map[0] == rbio->raid_map[0]) {
+               if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
                        spin_lock(&cur->bio_list_lock);
 
                        /* can we steal this cached rbio's pages? */
@@ -841,21 +825,6 @@ done_nolock:
                remove_rbio_from_cache(rbio);
 }
 
-static inline void
-__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
-{
-       if (need) {
-               kfree(raid_map);
-               kfree(bbio);
-       }
-}
-
-static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
-{
-       __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
-                       !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
-}
-
 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 {
        int i;
@@ -875,8 +844,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
                }
        }
 
-       free_bbio_and_raid_map(rbio);
-
+       btrfs_put_bbio(rbio->bbio);
        kfree(rbio);
 }
 
@@ -985,8 +953,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
  * this does not allocate any pages for rbio->pages.
  */
 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
-                         struct btrfs_bio *bbio, u64 *raid_map,
-                         u64 stripe_len)
+                         struct btrfs_bio *bbio, u64 stripe_len)
 {
        struct btrfs_raid_bio *rbio;
        int nr_data = 0;
@@ -1007,7 +974,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
        INIT_LIST_HEAD(&rbio->stripe_cache);
        INIT_LIST_HEAD(&rbio->hash_list);
        rbio->bbio = bbio;
-       rbio->raid_map = raid_map;
        rbio->fs_info = root->fs_info;
        rbio->stripe_len = stripe_len;
        rbio->nr_pages = num_pages;
@@ -1028,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
        rbio->bio_pages = p + sizeof(struct page *) * num_pages;
        rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
 
-       if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
+       if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
+               nr_data = real_stripes - 1;
+       else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
                nr_data = real_stripes - 2;
        else
-               nr_data = real_stripes - 1;
+               BUG();
 
        rbio->nr_data = nr_data;
        return rbio;
@@ -1182,7 +1150,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
        spin_lock_irq(&rbio->bio_list_lock);
        bio_list_for_each(bio, &rbio->bio_list) {
                start = (u64)bio->bi_iter.bi_sector << 9;
-               stripe_offset = start - rbio->raid_map[0];
+               stripe_offset = start - rbio->bbio->raid_map[0];
                page_index = stripe_offset >> PAGE_CACHE_SHIFT;
 
                for (i = 0; i < bio->bi_vcnt; i++) {
@@ -1402,7 +1370,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
        logical <<= 9;
 
        for (i = 0; i < rbio->nr_data; i++) {
-               stripe_start = rbio->raid_map[i];
+               stripe_start = rbio->bbio->raid_map[i];
                if (logical >= stripe_start &&
                    logical < stripe_start + rbio->stripe_len) {
                        return i;
@@ -1776,17 +1744,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
  * our main entry point for writes from the rest of the FS.
  */
 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
-                       struct btrfs_bio *bbio, u64 *raid_map,
-                       u64 stripe_len)
+                       struct btrfs_bio *bbio, u64 stripe_len)
 {
        struct btrfs_raid_bio *rbio;
        struct btrfs_plug_cb *plug = NULL;
        struct blk_plug_cb *cb;
        int ret;
 
-       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       rbio = alloc_rbio(root, bbio, stripe_len);
        if (IS_ERR(rbio)) {
-               __free_bbio_and_raid_map(bbio, raid_map, 1);
+               btrfs_put_bbio(bbio);
                return PTR_ERR(rbio);
        }
        bio_list_add(&rbio->bio_list, bio);
@@ -1885,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                }
 
                /* all raid6 handling here */
-               if (rbio->raid_map[rbio->real_stripes - 1] ==
-                   RAID6_Q_STRIPE) {
-
+               if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
                        /*
                         * single failure, rebuild from parity raid5
                         * style
@@ -1922,8 +1887,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                         * here due to a crc mismatch and we can't give them the
                         * data they want
                         */
-                       if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
-                               if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
+                       if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
+                               if (rbio->bbio->raid_map[faila] ==
+                                   RAID5_P_STRIPE) {
                                        err = -EIO;
                                        goto cleanup;
                                }
@@ -1934,7 +1900,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                                goto pstripe;
                        }
 
-                       if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
+                       if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
                                raid6_datap_recov(rbio->real_stripes,
                                                  PAGE_SIZE, faila, pointers);
                        } else {
@@ -2001,8 +1967,7 @@ cleanup:
 
 cleanup_io:
        if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
-               if (err == 0 &&
-                   !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
+               if (err == 0)
                        cache_rbio_pages(rbio);
                else
                        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -2156,15 +2121,16 @@ cleanup:
  * of the drive.
  */
 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
-                         struct btrfs_bio *bbio, u64 *raid_map,
-                         u64 stripe_len, int mirror_num, int generic_io)
+                         struct btrfs_bio *bbio, u64 stripe_len,
+                         int mirror_num, int generic_io)
 {
        struct btrfs_raid_bio *rbio;
        int ret;
 
-       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       rbio = alloc_rbio(root, bbio, stripe_len);
        if (IS_ERR(rbio)) {
-               __free_bbio_and_raid_map(bbio, raid_map, generic_io);
+               if (generic_io)
+                       btrfs_put_bbio(bbio);
                return PTR_ERR(rbio);
        }
 
@@ -2175,7 +2141,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
        rbio->faila = find_logical_bio_stripe(rbio, bio);
        if (rbio->faila == -1) {
                BUG();
-               __free_bbio_and_raid_map(bbio, raid_map, generic_io);
+               if (generic_io)
+                       btrfs_put_bbio(bbio);
                kfree(rbio);
                return -EIO;
        }
@@ -2184,7 +2151,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
                btrfs_bio_counter_inc_noblocked(root->fs_info);
                rbio->generic_bio_cnt = 1;
        } else {
-               set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
+               btrfs_get_bbio(bbio);
        }
 
        /*
@@ -2240,14 +2207,14 @@ static void read_rebuild_work(struct btrfs_work *work)
 
 struct btrfs_raid_bio *
 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
-                              struct btrfs_bio *bbio, u64 *raid_map,
-                              u64 stripe_len, struct btrfs_device *scrub_dev,
+                              struct btrfs_bio *bbio, u64 stripe_len,
+                              struct btrfs_device *scrub_dev,
                               unsigned long *dbitmap, int stripe_nsectors)
 {
        struct btrfs_raid_bio *rbio;
        int i;
 
-       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       rbio = alloc_rbio(root, bbio, stripe_len);
        if (IS_ERR(rbio))
                return NULL;
        bio_list_add(&rbio->bio_list, bio);
@@ -2279,10 +2246,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
        int stripe_offset;
        int index;
 
-       ASSERT(logical >= rbio->raid_map[0]);
-       ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
+       ASSERT(logical >= rbio->bbio->raid_map[0]);
+       ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
                                rbio->stripe_len * rbio->nr_data);
-       stripe_offset = (int)(logical - rbio->raid_map[0]);
+       stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
        index = stripe_offset >> PAGE_CACHE_SHIFT;
        rbio->bio_pages[index] = page;
 }
index 31d4a157b5e3a153fb283a12e8852a962a63f1f2..2b5d7977d83b2248e40d7331e4fd7d6d831ef1d6 100644 (file)
@@ -43,16 +43,15 @@ struct btrfs_raid_bio;
 struct btrfs_device;
 
 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
-                         struct btrfs_bio *bbio, u64 *raid_map,
-                         u64 stripe_len, int mirror_num, int generic_io);
+                         struct btrfs_bio *bbio, u64 stripe_len,
+                         int mirror_num, int generic_io);
 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
-                              struct btrfs_bio *bbio, u64 *raid_map,
-                              u64 stripe_len);
+                              struct btrfs_bio *bbio, u64 stripe_len);
 
 struct btrfs_raid_bio *
 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
-                              struct btrfs_bio *bbio, u64 *raid_map,
-                              u64 stripe_len, struct btrfs_device *scrub_dev,
+                              struct btrfs_bio *bbio, u64 stripe_len,
+                              struct btrfs_device *scrub_dev,
                               unsigned long *dbitmap, int stripe_nsectors);
 void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
                                   struct page *page, u64 logical);
index b63ae20618fb3f573d7917b088fa58a11f887293..0e7beea92b4cc1279def4a3c61a440ec177ef1a0 100644 (file)
@@ -66,7 +66,6 @@ struct reada_extctl {
 struct reada_extent {
        u64                     logical;
        struct btrfs_key        top;
-       u32                     blocksize;
        int                     err;
        struct list_head        extctl;
        int                     refcnt;
@@ -349,7 +348,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 
        blocksize = root->nodesize;
        re->logical = logical;
-       re->blocksize = blocksize;
        re->top = *top;
        INIT_LIST_HEAD(&re->extctl);
        spin_lock_init(&re->lock);
@@ -463,7 +461,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        spin_unlock(&fs_info->reada_lock);
        btrfs_dev_replace_unlock(&fs_info->dev_replace);
 
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
        return re;
 
 error:
@@ -488,7 +486,7 @@ error:
                kref_put(&zone->refcnt, reada_zone_release);
                spin_unlock(&fs_info->reada_lock);
        }
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
        kfree(re);
        return re_exist;
 }
@@ -660,7 +658,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
        int mirror_num = 0;
        struct extent_buffer *eb = NULL;
        u64 logical;
-       u32 blocksize;
        int ret;
        int i;
        int need_kick = 0;
@@ -694,7 +691,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                spin_unlock(&fs_info->reada_lock);
                return 0;
        }
-       dev->reada_next = re->logical + re->blocksize;
+       dev->reada_next = re->logical + fs_info->tree_root->nodesize;
        re->refcnt++;
 
        spin_unlock(&fs_info->reada_lock);
@@ -709,7 +706,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                }
        }
        logical = re->logical;
-       blocksize = re->blocksize;
 
        spin_lock(&re->lock);
        if (re->scheduled_for == NULL) {
@@ -724,8 +720,8 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                return 0;
 
        atomic_inc(&dev->reada_in_flight);
-       ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
-                        mirror_num, &eb);
+       ret = reada_tree_block_flagged(fs_info->extent_root, logical,
+                       mirror_num, &eb);
        if (ret)
                __readahead_hook(fs_info->extent_root, NULL, logical, ret);
        else if (eb)
@@ -851,7 +847,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                                break;
                        printk(KERN_DEBUG
                                "  re: logical %llu size %u empty %d for %lld",
-                               re->logical, re->blocksize,
+                               re->logical, fs_info->tree_root->nodesize,
                                list_empty(&re->extctl), re->scheduled_for ?
                                re->scheduled_for->devid : -1);
 
@@ -886,7 +882,8 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                }
                printk(KERN_DEBUG
                        "re: logical %llu size %u list empty %d for %lld",
-                       re->logical, re->blocksize, list_empty(&re->extctl),
+                       re->logical, fs_info->tree_root->nodesize,
+                       list_empty(&re->extctl),
                        re->scheduled_for ? re->scheduled_for->devid : -1);
                for (i = 0; i < re->nzones; ++i) {
                        printk(KERN_CONT " zone %llu-%llu devs",
index 74257d6436adda1b772d8658be41202093ef577a..d83085381bccfa745ee0258b8cab5558afbcd639 100644 (file)
@@ -2855,9 +2855,10 @@ static void update_processed_blocks(struct reloc_control *rc,
        }
 }
 
-static int tree_block_processed(u64 bytenr, u32 blocksize,
-                               struct reloc_control *rc)
+static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
 {
+       u32 blocksize = rc->extent_root->nodesize;
+
        if (test_range_bit(&rc->processed_blocks, bytenr,
                           bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
                return 1;
@@ -2965,8 +2966,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
        while (rb_node) {
                block = rb_entry(rb_node, struct tree_block, rb_node);
                if (!block->key_ready)
-                       readahead_tree_block(rc->extent_root, block->bytenr,
-                                       block->key.objectid);
+                       readahead_tree_block(rc->extent_root, block->bytenr);
                rb_node = rb_next(rb_node);
        }
 
@@ -3353,7 +3353,7 @@ static int __add_tree_block(struct reloc_control *rc,
        bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
                                        SKINNY_METADATA);
 
-       if (tree_block_processed(bytenr, blocksize, rc))
+       if (tree_block_processed(bytenr, rc))
                return 0;
 
        if (tree_search(blocks, bytenr))
@@ -3611,7 +3611,7 @@ static int find_data_references(struct reloc_control *rc,
                if (added)
                        goto next;
 
-               if (!tree_block_processed(leaf->start, leaf->len, rc)) {
+               if (!tree_block_processed(leaf->start, rc)) {
                        block = kmalloc(sizeof(*block), GFP_NOFS);
                        if (!block) {
                                err = -ENOMEM;
index e427cb7ee12c7d848cd16402d78854e22db969dd..ec57687c9a4d8a079466d6e9f95724188ec03bc7 100644 (file)
@@ -66,7 +66,6 @@ struct scrub_ctx;
 struct scrub_recover {
        atomic_t                refs;
        struct btrfs_bio        *bbio;
-       u64                     *raid_map;
        u64                     map_length;
 };
 
@@ -80,7 +79,7 @@ struct scrub_page {
        u64                     logical;
        u64                     physical;
        u64                     physical_for_dev_replace;
-       atomic_t                ref_count;
+       atomic_t                refs;
        struct {
                unsigned int    mirror_num:8;
                unsigned int    have_csum:1;
@@ -113,7 +112,7 @@ struct scrub_block {
        struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
        int                     page_count;
        atomic_t                outstanding_pages;
-       atomic_t                ref_count; /* free mem on transition to zero */
+       atomic_t                refs; /* free mem on transition to zero */
        struct scrub_ctx        *sctx;
        struct scrub_parity     *sparity;
        struct {
@@ -142,7 +141,7 @@ struct scrub_parity {
 
        int                     stripe_len;
 
-       atomic_t                ref_count;
+       atomic_t                refs;
 
        struct list_head        spages;
 
@@ -194,6 +193,15 @@ struct scrub_ctx {
         */
        struct btrfs_scrub_progress stat;
        spinlock_t              stat_lock;
+
+       /*
+        * Use a ref counter to avoid use-after-free issues. Scrub workers
+        * decrement bios_in_flight and workers_pending and then do a wakeup
+        * on the list_wait wait queue. We must ensure the main scrub task
+        * doesn't free the scrub context before or while the workers are
+        * doing the wakeup() call.
+        */
+       atomic_t                refs;
 };
 
 struct scrub_fixup_nodatasum {
@@ -236,10 +244,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
-static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
-                                    struct btrfs_fs_info *fs_info,
-                                    struct scrub_block *original_sblock,
-                                    u64 length, u64 logical,
+static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
                                     struct scrub_block *sblocks_for_recheck);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                                struct scrub_block *sblock, int is_metadata,
@@ -251,8 +256,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
                                         const u8 *csum, u64 generation,
                                         u16 csum_size);
 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
-                                            struct scrub_block *sblock_good,
-                                            int force_write);
+                                            struct scrub_block *sblock_good);
 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                                            struct scrub_block *sblock_good,
                                            int page_num, int force_write);
@@ -302,10 +306,12 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 static void copy_nocow_pages_worker(struct btrfs_work *work);
 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
+static void scrub_put_ctx(struct scrub_ctx *sctx);
 
 
 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
 {
+       atomic_inc(&sctx->refs);
        atomic_inc(&sctx->bios_in_flight);
 }
 
@@ -313,6 +319,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
 {
        atomic_dec(&sctx->bios_in_flight);
        wake_up(&sctx->list_wait);
+       scrub_put_ctx(sctx);
 }
 
 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
@@ -346,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
 {
        struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
 
+       atomic_inc(&sctx->refs);
        /*
         * increment scrubs_running to prevent cancel requests from
         * completing as long as a worker is running. we must also
@@ -388,6 +396,7 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
        atomic_dec(&sctx->workers_pending);
        wake_up(&fs_info->scrub_pause_wait);
        wake_up(&sctx->list_wait);
+       scrub_put_ctx(sctx);
 }
 
 static void scrub_free_csums(struct scrub_ctx *sctx)
@@ -433,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
        kfree(sctx);
 }
 
+static void scrub_put_ctx(struct scrub_ctx *sctx)
+{
+       if (atomic_dec_and_test(&sctx->refs))
+               scrub_free_ctx(sctx);
+}
+
 static noinline_for_stack
 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 {
@@ -457,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
        if (!sctx)
                goto nomem;
+       atomic_set(&sctx->refs, 1);
        sctx->is_dev_replace = is_dev_replace;
        sctx->pages_per_rd_bio = pages_per_rd_bio;
        sctx->curr = -1;
@@ -520,6 +536,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
        struct inode_fs_paths *ipath = NULL;
        struct btrfs_root *local_root;
        struct btrfs_key root_key;
+       struct btrfs_key key;
 
        root_key.objectid = root;
        root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -530,7 +547,14 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
                goto err;
        }
 
-       ret = inode_item_info(inum, 0, local_root, swarn->path);
+       /*
+        * this makes the path point to (inum INODE_ITEM ioff)
+        */
+       key.objectid = inum;
+       key.type = BTRFS_INODE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
        if (ret) {
                btrfs_release_path(swarn->path);
                goto err;
@@ -848,8 +872,7 @@ static inline void scrub_get_recover(struct scrub_recover *recover)
 static inline void scrub_put_recover(struct scrub_recover *recover)
 {
        if (atomic_dec_and_test(&recover->refs)) {
-               kfree(recover->bbio);
-               kfree(recover->raid_map);
+               btrfs_put_bbio(recover->bbio);
                kfree(recover);
        }
 }
@@ -955,8 +978,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
        }
 
        /* setup the context, map the logical blocks and alloc the pages */
-       ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
-                                       logical, sblocks_for_recheck);
+       ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
        if (ret) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.read_errors++;
@@ -1030,9 +1052,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
        if (!is_metadata && !have_csum) {
                struct scrub_fixup_nodatasum *fixup_nodatasum;
 
-nodatasum_case:
                WARN_ON(sctx->is_dev_replace);
 
+nodatasum_case:
+
                /*
                 * !is_metadata and !have_csum, this means that the data
                 * might not be COW'ed, that it might be modified
@@ -1091,76 +1114,20 @@ nodatasum_case:
                    sblock_other->no_io_error_seen) {
                        if (sctx->is_dev_replace) {
                                scrub_write_block_to_dev_replace(sblock_other);
+                               goto corrected_error;
                        } else {
-                               int force_write = is_metadata || have_csum;
-
                                ret = scrub_repair_block_from_good_copy(
-                                               sblock_bad, sblock_other,
-                                               force_write);
+                                               sblock_bad, sblock_other);
+                               if (!ret)
+                                       goto corrected_error;
                        }
-                       if (0 == ret)
-                               goto corrected_error;
                }
        }
 
-       /*
-        * for dev_replace, pick good pages and write to the target device.
-        */
-       if (sctx->is_dev_replace) {
-               success = 1;
-               for (page_num = 0; page_num < sblock_bad->page_count;
-                    page_num++) {
-                       int sub_success;
-
-                       sub_success = 0;
-                       for (mirror_index = 0;
-                            mirror_index < BTRFS_MAX_MIRRORS &&
-                            sblocks_for_recheck[mirror_index].page_count > 0;
-                            mirror_index++) {
-                               struct scrub_block *sblock_other =
-                                       sblocks_for_recheck + mirror_index;
-                               struct scrub_page *page_other =
-                                       sblock_other->pagev[page_num];
-
-                               if (!page_other->io_error) {
-                                       ret = scrub_write_page_to_dev_replace(
-                                                       sblock_other, page_num);
-                                       if (ret == 0) {
-                                               /* succeeded for this page */
-                                               sub_success = 1;
-                                               break;
-                                       } else {
-                                               btrfs_dev_replace_stats_inc(
-                                                       &sctx->dev_root->
-                                                       fs_info->dev_replace.
-                                                       num_write_errors);
-                                       }
-                               }
-                       }
-
-                       if (!sub_success) {
-                               /*
-                                * did not find a mirror to fetch the page
-                                * from. scrub_write_page_to_dev_replace()
-                                * handles this case (page->io_error), by
-                                * filling the block with zeros before
-                                * submitting the write request
-                                */
-                               success = 0;
-                               ret = scrub_write_page_to_dev_replace(
-                                               sblock_bad, page_num);
-                               if (ret)
-                                       btrfs_dev_replace_stats_inc(
-                                               &sctx->dev_root->fs_info->
-                                               dev_replace.num_write_errors);
-                       }
-               }
-
-               goto out;
-       }
+       if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
+               goto did_not_correct_error;
 
        /*
-        * for regular scrub, repair those pages that are errored.
         * In case of I/O errors in the area that is supposed to be
         * repaired, continue by picking good copies of those pages.
         * Select the good pages from mirrors to rewrite bad pages from
@@ -1184,44 +1151,64 @@ nodatasum_case:
         * mirror, even if other 512 byte sectors in the same PAGE_SIZE
         * area are unreadable.
         */
-
-       /* can only fix I/O errors from here on */
-       if (sblock_bad->no_io_error_seen)
-               goto did_not_correct_error;
-
        success = 1;
-       for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
+       for (page_num = 0; page_num < sblock_bad->page_count;
+            page_num++) {
                struct scrub_page *page_bad = sblock_bad->pagev[page_num];
+               struct scrub_block *sblock_other = NULL;
 
-               if (!page_bad->io_error)
+               /* skip no-io-error page in scrub */
+               if (!page_bad->io_error && !sctx->is_dev_replace)
                        continue;
 
-               for (mirror_index = 0;
-                    mirror_index < BTRFS_MAX_MIRRORS &&
-                    sblocks_for_recheck[mirror_index].page_count > 0;
-                    mirror_index++) {
-                       struct scrub_block *sblock_other = sblocks_for_recheck +
-                                                          mirror_index;
-                       struct scrub_page *page_other = sblock_other->pagev[
-                                                       page_num];
-
-                       if (!page_other->io_error) {
-                               ret = scrub_repair_page_from_good_copy(
-                                       sblock_bad, sblock_other, page_num, 0);
-                               if (0 == ret) {
-                                       page_bad->io_error = 0;
-                                       break; /* succeeded for this page */
+               /* try to find no-io-error page in mirrors */
+               if (page_bad->io_error) {
+                       for (mirror_index = 0;
+                            mirror_index < BTRFS_MAX_MIRRORS &&
+                            sblocks_for_recheck[mirror_index].page_count > 0;
+                            mirror_index++) {
+                               if (!sblocks_for_recheck[mirror_index].
+                                   pagev[page_num]->io_error) {
+                                       sblock_other = sblocks_for_recheck +
+                                                      mirror_index;
+                                       break;
                                }
                        }
+                       if (!sblock_other)
+                               success = 0;
                }
 
-               if (page_bad->io_error) {
-                       /* did not find a mirror to copy the page from */
-                       success = 0;
+               if (sctx->is_dev_replace) {
+                       /*
+                        * did not find a mirror to fetch the page
+                        * from. scrub_write_page_to_dev_replace()
+                        * handles this case (page->io_error), by
+                        * filling the block with zeros before
+                        * submitting the write request
+                        */
+                       if (!sblock_other)
+                               sblock_other = sblock_bad;
+
+                       if (scrub_write_page_to_dev_replace(sblock_other,
+                                                           page_num) != 0) {
+                               btrfs_dev_replace_stats_inc(
+                                       &sctx->dev_root->
+                                       fs_info->dev_replace.
+                                       num_write_errors);
+                               success = 0;
+                       }
+               } else if (sblock_other) {
+                       ret = scrub_repair_page_from_good_copy(sblock_bad,
+                                                              sblock_other,
+                                                              page_num, 0);
+                       if (0 == ret)
+                               page_bad->io_error = 0;
+                       else
+                               success = 0;
                }
        }
 
-       if (success) {
+       if (success && !sctx->is_dev_replace) {
                if (is_metadata || have_csum) {
                        /*
                         * need to verify the checksum now that all
@@ -1288,19 +1275,18 @@ out:
        return 0;
 }
 
-static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
+static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
 {
-       if (raid_map) {
-               if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
-                       return 3;
-               else
-                       return 2;
-       } else {
+       if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
+               return 2;
+       else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
+               return 3;
+       else
                return (int)bbio->num_stripes;
-       }
 }
 
-static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
+static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
+                                                u64 *raid_map,
                                                 u64 mapped_length,
                                                 int nstripes, int mirror,
                                                 int *stripe_index,
@@ -1308,7 +1294,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
 {
        int i;
 
-       if (raid_map) {
+       if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                /* RAID5/6 */
                for (i = 0; i < nstripes; i++) {
                        if (raid_map[i] == RAID6_Q_STRIPE ||
@@ -1329,72 +1315,65 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
        }
 }
 
-static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
-                                    struct btrfs_fs_info *fs_info,
-                                    struct scrub_block *original_sblock,
-                                    u64 length, u64 logical,
+static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
                                     struct scrub_block *sblocks_for_recheck)
 {
+       struct scrub_ctx *sctx = original_sblock->sctx;
+       struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+       u64 length = original_sblock->page_count * PAGE_SIZE;
+       u64 logical = original_sblock->pagev[0]->logical;
        struct scrub_recover *recover;
        struct btrfs_bio *bbio;
-       u64 *raid_map;
        u64 sublen;
        u64 mapped_length;
        u64 stripe_offset;
        int stripe_index;
-       int page_index;
+       int page_index = 0;
        int mirror_index;
        int nmirrors;
        int ret;
 
        /*
-        * note: the two members ref_count and outstanding_pages
+        * note: the two members refs and outstanding_pages
         * are not used (and not set) in the blocks that are used for
         * the recheck procedure
         */
 
-       page_index = 0;
        while (length > 0) {
                sublen = min_t(u64, length, PAGE_SIZE);
                mapped_length = sublen;
                bbio = NULL;
-               raid_map = NULL;
 
                /*
                 * with a length of PAGE_SIZE, each returned stripe
                 * represents one mirror
                 */
                ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
-                                      &mapped_length, &bbio, 0, &raid_map);
+                                      &mapped_length, &bbio, 0, 1);
                if (ret || !bbio || mapped_length < sublen) {
-                       kfree(bbio);
-                       kfree(raid_map);
+                       btrfs_put_bbio(bbio);
                        return -EIO;
                }
 
                recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
                if (!recover) {
-                       kfree(bbio);
-                       kfree(raid_map);
+                       btrfs_put_bbio(bbio);
                        return -ENOMEM;
                }
 
                atomic_set(&recover->refs, 1);
                recover->bbio = bbio;
-               recover->raid_map = raid_map;
                recover->map_length = mapped_length;
 
                BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
 
-               nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
+               nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
+
                for (mirror_index = 0; mirror_index < nmirrors;
                     mirror_index++) {
                        struct scrub_block *sblock;
                        struct scrub_page *page;
 
-                       if (mirror_index >= BTRFS_MAX_MIRRORS)
-                               continue;
-
                        sblock = sblocks_for_recheck + mirror_index;
                        sblock->sctx = sctx;
                        page = kzalloc(sizeof(*page), GFP_NOFS);
@@ -1410,9 +1389,12 @@ leave_nomem:
                        sblock->pagev[page_index] = page;
                        page->logical = logical;
 
-                       scrub_stripe_index_and_offset(logical, raid_map,
+                       scrub_stripe_index_and_offset(logical,
+                                                     bbio->map_type,
+                                                     bbio->raid_map,
                                                      mapped_length,
-                                                     bbio->num_stripes,
+                                                     bbio->num_stripes -
+                                                     bbio->num_tgtdevs,
                                                      mirror_index,
                                                      &stripe_index,
                                                      &stripe_offset);
@@ -1458,7 +1440,8 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
 
 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
 {
-       return page->recover && page->recover->raid_map;
+       return page->recover &&
+              (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
 }
 
 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
@@ -1475,7 +1458,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
        bio->bi_end_io = scrub_bio_wait_endio;
 
        ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
-                                   page->recover->raid_map,
                                    page->recover->map_length,
                                    page->mirror_num, 0);
        if (ret)
@@ -1615,8 +1597,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
 }
 
 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
-                                            struct scrub_block *sblock_good,
-                                            int force_write)
+                                            struct scrub_block *sblock_good)
 {
        int page_num;
        int ret = 0;
@@ -1626,8 +1607,7 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 
                ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
                                                           sblock_good,
-                                                          page_num,
-                                                          force_write);
+                                                          page_num, 1);
                if (ret_sub)
                        ret = ret_sub;
        }
@@ -2067,12 +2047,12 @@ static int scrub_checksum_super(struct scrub_block *sblock)
 
 static void scrub_block_get(struct scrub_block *sblock)
 {
-       atomic_inc(&sblock->ref_count);
+       atomic_inc(&sblock->refs);
 }
 
 static void scrub_block_put(struct scrub_block *sblock)
 {
-       if (atomic_dec_and_test(&sblock->ref_count)) {
+       if (atomic_dec_and_test(&sblock->refs)) {
                int i;
 
                if (sblock->sparity)
@@ -2086,12 +2066,12 @@ static void scrub_block_put(struct scrub_block *sblock)
 
 static void scrub_page_get(struct scrub_page *spage)
 {
-       atomic_inc(&spage->ref_count);
+       atomic_inc(&spage->refs);
 }
 
 static void scrub_page_put(struct scrub_page *spage)
 {
-       if (atomic_dec_and_test(&spage->ref_count)) {
+       if (atomic_dec_and_test(&spage->refs)) {
                if (spage->page)
                        __free_page(spage->page);
                kfree(spage);
@@ -2217,7 +2197,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 
        /* one ref inside this function, plus one for each page added to
         * a bio later on */
-       atomic_set(&sblock->ref_count, 1);
+       atomic_set(&sblock->refs, 1);
        sblock->sctx = sctx;
        sblock->no_io_error_seen = 1;
 
@@ -2510,7 +2490,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
 
        /* one ref inside this function, plus one for each page added to
         * a bio later on */
-       atomic_set(&sblock->ref_count, 1);
+       atomic_set(&sblock->refs, 1);
        sblock->sctx = sctx;
        sblock->no_io_error_seen = 1;
        sblock->sparity = sparity;
@@ -2705,7 +2685,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
        struct btrfs_raid_bio *rbio;
        struct scrub_page *spage;
        struct btrfs_bio *bbio = NULL;
-       u64 *raid_map = NULL;
        u64 length;
        int ret;
 
@@ -2716,8 +2695,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
        length = sparity->logic_end - sparity->logic_start + 1;
        ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
                               sparity->logic_start,
-                              &length, &bbio, 0, &raid_map);
-       if (ret || !bbio || !raid_map)
+                              &length, &bbio, 0, 1);
+       if (ret || !bbio || !bbio->raid_map)
                goto bbio_out;
 
        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
@@ -2729,8 +2708,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
        bio->bi_end_io = scrub_parity_bio_endio;
 
        rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
-                                             raid_map, length,
-                                             sparity->scrub_dev,
+                                             length, sparity->scrub_dev,
                                              sparity->dbitmap,
                                              sparity->nsectors);
        if (!rbio)
@@ -2747,8 +2725,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 rbio_out:
        bio_put(bio);
 bbio_out:
-       kfree(bbio);
-       kfree(raid_map);
+       btrfs_put_bbio(bbio);
        bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
                  sparity->nsectors);
        spin_lock(&sctx->stat_lock);
@@ -2765,12 +2742,12 @@ static inline int scrub_calc_parity_bitmap_len(int nsectors)
 
 static void scrub_parity_get(struct scrub_parity *sparity)
 {
-       atomic_inc(&sparity->ref_count);
+       atomic_inc(&sparity->refs);
 }
 
 static void scrub_parity_put(struct scrub_parity *sparity)
 {
-       if (!atomic_dec_and_test(&sparity->ref_count))
+       if (!atomic_dec_and_test(&sparity->refs))
                return;
 
        scrub_parity_check_and_repair(sparity);
@@ -2820,7 +2797,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
        sparity->scrub_dev = sdev;
        sparity->logic_start = logic_start;
        sparity->logic_end = logic_end;
-       atomic_set(&sparity->ref_count, 1);
+       atomic_set(&sparity->refs, 1);
        INIT_LIST_HEAD(&sparity->spages);
        sparity->dbitmap = sparity->bitmap;
        sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
@@ -3037,8 +3014,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
                increment = map->stripe_len;
                mirror_num = num % map->num_stripes + 1;
-       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6)) {
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                get_raid56_logic_offset(physical, num, map, &offset, NULL);
                increment = map->stripe_len * nr_data_stripes(map);
                mirror_num = 1;
@@ -3074,8 +3050,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
         */
        logical = base + offset;
        physical_end = physical + nstripes * map->stripe_len;
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6)) {
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                get_raid56_logic_offset(physical_end, num,
                                        map, &logic_end, NULL);
                logic_end += base;
@@ -3121,8 +3096,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        ret = 0;
        while (physical < physical_end) {
                /* for raid56, we skip parity stripe */
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6)) {
+               if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                        ret = get_raid56_logic_offset(physical, num,
                                        map, &logical, &stripe_logical);
                        logical += base;
@@ -3280,8 +3254,7 @@ again:
                        scrub_free_csums(sctx);
                        if (extent_logical + extent_len <
                            key.objectid + bytes) {
-                               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                                       BTRFS_BLOCK_GROUP_RAID6)) {
+                               if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                                        /*
                                         * loop until we find next data stripe
                                         * or we have finished all stripes.
@@ -3775,7 +3748,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        scrub_workers_put(fs_info);
        mutex_unlock(&fs_info->scrub_lock);
 
-       scrub_free_ctx(sctx);
+       scrub_put_ctx(sctx);
 
        return ret;
 }
@@ -3881,14 +3854,14 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
                              &mapped_length, &bbio, 0);
        if (ret || !bbio || mapped_length < extent_len ||
            !bbio->stripes[0].dev->bdev) {
-               kfree(bbio);
+               btrfs_put_bbio(bbio);
                return;
        }
 
        *extent_physical = bbio->stripes[0].physical;
        *extent_mirror_num = bbio->mirror_num;
        *extent_dev = bbio->stripes[0].dev;
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
 }
 
 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
index 804432dbc351d8b73064724cbfbc08237d522298..fe5857223515d14753c1f75ce60c2c12e16a3e67 100644 (file)
@@ -2471,12 +2471,9 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
        if (ret < 0)
                goto out;
        TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
-       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
-                       btrfs_inode_atime(ii));
-       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
-                       btrfs_inode_mtime(ii));
-       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
-                       btrfs_inode_ctime(ii));
+       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
+       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
+       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
        /* TODO Add otime support when the otime patches get into upstream */
 
        ret = send_cmd(sctx);
index 6f49b2872a6454330bac0ef912be3d0152e2ef4f..05fef198ff94fc8df4eb126a62195af8452fb18d 100644 (file)
@@ -1958,11 +1958,6 @@ static int btrfs_freeze(struct super_block *sb)
        return btrfs_commit_transaction(trans, root);
 }
 
-static int btrfs_unfreeze(struct super_block *sb)
-{
-       return 0;
-}
-
 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -2011,7 +2006,6 @@ static const struct super_operations btrfs_super_ops = {
        .statfs         = btrfs_statfs,
        .remount_fs     = btrfs_remount,
        .freeze_fs      = btrfs_freeze,
-       .unfreeze_fs    = btrfs_unfreeze,
 };
 
 static const struct file_operations btrfs_ctl_fops = {
index 92db3f648df40cc5d1fe3428a89348efffe45776..94edb0a2a026652b6cb017f45cc5e1ef9d521277 100644 (file)
@@ -733,10 +733,18 @@ int btrfs_init_sysfs(void)
 
        ret = btrfs_init_debugfs();
        if (ret)
-               return ret;
+               goto out1;
 
        init_feature_attrs();
        ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
+       if (ret)
+               goto out2;
+
+       return 0;
+out2:
+       debugfs_remove_recursive(btrfs_debugfs_root_dentry);
+out1:
+       kset_unregister(btrfs_kset);
 
        return ret;
 }
index cc286ce97d1e92b87308ed6c037406f68f302cb9..f51963a8f929e97d8030ca1a1e228263094cf244 100644 (file)
@@ -53,7 +53,7 @@ static int test_btrfs_split_item(void)
                return -ENOMEM;
        }
 
-       path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096);
+       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
        if (!eb) {
                test_msg("Could not allocate dummy buffer\n");
                ret = -ENOMEM;
index 7e99c2f98dd007984d5a5e16678cf42b1c2950c4..9e9f2368177d42114726fe4859ca938c8cd8079e 100644 (file)
@@ -258,8 +258,7 @@ static int test_find_delalloc(void)
        }
        ret = 0;
 out_bits:
-       clear_extent_bits(&tmp, 0, total_dirty - 1,
-                         (unsigned long)-1, GFP_NOFS);
+       clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS);
 out:
        if (locked_page)
                page_cache_release(locked_page);
index 3ae0f5b8bb80d619979919ff15d9241681f00e1a..a116b55ce7880a5e64837dfd6cd66f7412e27cec 100644 (file)
@@ -255,7 +255,7 @@ static noinline int test_btrfs_get_extent(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(0, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, 4096);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -843,7 +843,7 @@ static int test_hole_first(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(0, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, 4096);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
index ec3dcb20235774044e4b92e1230b1b0af593aea4..73f299ebdabb4389964fafeb7506bdd9716dea67 100644 (file)
@@ -404,12 +404,22 @@ int btrfs_test_qgroups(void)
                ret = -ENOMEM;
                goto out;
        }
+       /* We are using this root as our extent root */
+       root->fs_info->extent_root = root;
+
+       /*
+        * Some of the paths we test assume we have a filled out fs_info, so we
+        * just need to add the root in there so we don't panic.
+        */
+       root->fs_info->tree_root = root;
+       root->fs_info->quota_root = root;
+       root->fs_info->quota_enabled = 1;
 
        /*
         * Can't use bytenr 0, some things freak out
         * *cough*backref walking code*cough*
         */
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -448,17 +458,6 @@ int btrfs_test_qgroups(void)
                goto out;
        }
 
-       /* We are using this root as our extent root */
-       root->fs_info->extent_root = root;
-
-       /*
-        * Some of the paths we test assume we have a filled out fs_info, so we
-        * just need to addt he root in there so we don't panic.
-        */
-       root->fs_info->tree_root = root;
-       root->fs_info->quota_root = root;
-       root->fs_info->quota_enabled = 1;
-
        test_msg("Running qgroup tests\n");
        ret = test_no_shared_qgroup(root);
        if (ret)
index e88b59d13439690f15810359ee7be343ad86b7a9..7e80f32550a663e7438d3e5fb8b1c37fcc391b64 100644 (file)
@@ -220,6 +220,7 @@ loop:
         * commit the transaction.
         */
        atomic_set(&cur_trans->use_count, 2);
+       cur_trans->have_free_bgs = 0;
        cur_trans->start_time = get_seconds();
 
        cur_trans->delayed_refs.href_root = RB_ROOT;
@@ -248,6 +249,8 @@ loop:
        INIT_LIST_HEAD(&cur_trans->pending_chunks);
        INIT_LIST_HEAD(&cur_trans->switch_commits);
        INIT_LIST_HEAD(&cur_trans->pending_ordered);
+       INIT_LIST_HEAD(&cur_trans->dirty_bgs);
+       spin_lock_init(&cur_trans->dirty_bgs_lock);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
                             fs_info->btree_inode->i_mapping);
@@ -1020,6 +1023,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
        u64 old_root_bytenr;
        u64 old_root_used;
        struct btrfs_root *tree_root = root->fs_info->tree_root;
+       bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID);
 
        old_root_used = btrfs_root_used(&root->root_item);
        btrfs_write_dirty_block_groups(trans, root);
@@ -1027,7 +1031,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
        while (1) {
                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
                if (old_root_bytenr == root->node->start &&
-                   old_root_used == btrfs_root_used(&root->root_item))
+                   old_root_used == btrfs_root_used(&root->root_item) &&
+                   (!extent_root ||
+                    list_empty(&trans->transaction->dirty_bgs)))
                        break;
 
                btrfs_set_root_node(&root->root_item, root->node);
@@ -1038,7 +1044,15 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                        return ret;
 
                old_root_used = btrfs_root_used(&root->root_item);
-               ret = btrfs_write_dirty_block_groups(trans, root);
+               if (extent_root) {
+                       ret = btrfs_write_dirty_block_groups(trans, root);
+                       if (ret)
+                               return ret;
+               }
+               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+               if (ret)
+                       return ret;
+               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
                if (ret)
                        return ret;
        }
@@ -1061,10 +1075,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
        struct extent_buffer *eb;
        int ret;
 
-       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-       if (ret)
-               return ret;
-
        eb = btrfs_lock_root_node(fs_info->tree_root);
        ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
                              0, &eb);
@@ -1097,6 +1107,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
                root = list_entry(next, struct btrfs_root, dirty_list);
+               clear_bit(BTRFS_ROOT_DIRTY, &root->state);
 
                if (root != fs_info->extent_root)
                        list_add_tail(&root->dirty_list,
@@ -1983,6 +1994,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        switch_commit_roots(cur_trans, root->fs_info);
 
        assert_qgroups_uptodate(trans);
+       ASSERT(list_empty(&cur_trans->dirty_bgs));
        update_super_roots(root);
 
        btrfs_set_super_log_root(root->fs_info->super_copy, 0);
@@ -2026,6 +2038,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_finish_extent_commit(trans, root);
 
+       if (cur_trans->have_free_bgs)
+               btrfs_clear_space_info_full(root->fs_info);
+
        root->fs_info->last_trans_committed = cur_trans->transid;
        /*
         * We needn't acquire the lock here because there is no other task
index 00ed29c4b3f9d0ee4bc10f3e2da2424a8642020b..937050a2b68edaf6bdd6027f23c6c6d37b8257d5 100644 (file)
@@ -47,6 +47,11 @@ struct btrfs_transaction {
        atomic_t num_writers;
        atomic_t use_count;
 
+       /*
+        * true if there is free bgs operations in this transaction
+        */
+       int have_free_bgs;
+
        /* Be protected by fs_info->trans_lock when we want to change it. */
        enum btrfs_trans_state state;
        struct list_head list;
@@ -58,6 +63,8 @@ struct btrfs_transaction {
        struct list_head pending_chunks;
        struct list_head pending_ordered;
        struct list_head switch_commits;
+       struct list_head dirty_bgs;
+       spinlock_t dirty_bgs_lock;
        struct btrfs_delayed_ref_root delayed_refs;
        int aborted;
 };
index 1a9585d4380a330f96ba7d82f63cb87314d1701e..9a37f8b39bae9058a20aafe11c59793409115005 100644 (file)
@@ -453,11 +453,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 insert:
        btrfs_release_path(path);
        /* try to insert the key into the destination tree */
+       path->skip_release_on_error = 1;
        ret = btrfs_insert_empty_item(trans, root, path,
                                      key, item_size);
+       path->skip_release_on_error = 0;
 
        /* make sure any existing item is the correct size */
-       if (ret == -EEXIST) {
+       if (ret == -EEXIST || ret == -EOVERFLOW) {
                u32 found_size;
                found_size = btrfs_item_size_nr(path->nodes[0],
                                                path->slots[0]);
@@ -488,8 +490,20 @@ insert:
                src_item = (struct btrfs_inode_item *)src_ptr;
                dst_item = (struct btrfs_inode_item *)dst_ptr;
 
-               if (btrfs_inode_generation(eb, src_item) == 0)
+               if (btrfs_inode_generation(eb, src_item) == 0) {
+                       struct extent_buffer *dst_eb = path->nodes[0];
+
+                       if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
+                           S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
+                               struct btrfs_map_token token;
+                               u64 ino_size = btrfs_inode_size(eb, src_item);
+
+                               btrfs_init_map_token(&token);
+                               btrfs_set_token_inode_size(dst_eb, dst_item,
+                                                          ino_size, &token);
+                       }
                        goto no_copy;
+               }
 
                if (overwrite_root &&
                    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
@@ -844,7 +858,7 @@ out:
 static noinline int backref_in_log(struct btrfs_root *log,
                                   struct btrfs_key *key,
                                   u64 ref_objectid,
-                                  char *name, int namelen)
+                                  const char *name, int namelen)
 {
        struct btrfs_path *path;
        struct btrfs_inode_ref *ref;
@@ -1254,13 +1268,14 @@ out:
 }
 
 static int insert_orphan_item(struct btrfs_trans_handle *trans,
-                             struct btrfs_root *root, u64 offset)
+                             struct btrfs_root *root, u64 ino)
 {
        int ret;
-       ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
-                       offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
-       if (ret > 0)
-               ret = btrfs_insert_orphan_item(trans, root, offset);
+
+       ret = btrfs_insert_orphan_item(trans, root, ino);
+       if (ret == -EEXIST)
+               ret = 0;
+
        return ret;
 }
 
@@ -1287,6 +1302,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
                leaf = path->nodes[0];
                item_size = btrfs_item_size_nr(leaf, path->slots[0]);
                ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+               cur_offset = 0;
 
                while (cur_offset < item_size) {
                        extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
@@ -1302,7 +1318,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
        }
        btrfs_release_path(path);
 
-       if (ret < 0)
+       if (ret < 0 && ret != -ENOENT)
                return ret;
        return nlink;
 }
@@ -1394,9 +1410,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        nlink = ret;
 
        ret = count_inode_extrefs(root, inode, path);
-       if (ret == -ENOENT)
-               ret = 0;
-
        if (ret < 0)
                goto out;
 
@@ -1556,6 +1569,30 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/*
+ * Return true if an inode reference exists in the log for the given name,
+ * inode and parent inode.
+ */
+static bool name_in_log_ref(struct btrfs_root *log_root,
+                           const char *name, const int name_len,
+                           const u64 dirid, const u64 ino)
+{
+       struct btrfs_key search_key;
+
+       search_key.objectid = ino;
+       search_key.type = BTRFS_INODE_REF_KEY;
+       search_key.offset = dirid;
+       if (backref_in_log(log_root, &search_key, dirid, name, name_len))
+               return true;
+
+       search_key.type = BTRFS_INODE_EXTREF_KEY;
+       search_key.offset = btrfs_extref_hash(dirid, name, name_len);
+       if (backref_in_log(log_root, &search_key, dirid, name, name_len))
+               return true;
+
+       return false;
+}
+
 /*
  * take a single entry in a log directory item and replay it into
  * the subvolume.
@@ -1666,10 +1703,17 @@ out:
        return ret;
 
 insert:
+       if (name_in_log_ref(root->log_root, name, name_len,
+                           key->objectid, log_key.objectid)) {
+               /* The dentry will be added later. */
+               ret = 0;
+               update_size = false;
+               goto out;
+       }
        btrfs_release_path(path);
        ret = insert_one_name(trans, root, path, key->objectid, key->offset,
                              name, name_len, log_type, &log_key);
-       if (ret && ret != -ENOENT)
+       if (ret && ret != -ENOENT && ret != -EEXIST)
                goto out;
        update_size = false;
        ret = 0;
@@ -2164,7 +2208,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                parent = path->nodes[*level];
                root_owner = btrfs_header_owner(parent);
 
-               next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+               next = btrfs_find_create_tree_block(root, bytenr);
                if (!next)
                        return -ENOMEM;
 
@@ -2416,8 +2460,8 @@ static void wait_for_writer(struct btrfs_trans_handle *trans,
                mutex_unlock(&root->log_mutex);
                if (atomic_read(&root->log_writers))
                        schedule();
-               mutex_lock(&root->log_mutex);
                finish_wait(&root->log_writer_wait, &wait);
+               mutex_lock(&root->log_mutex);
        }
 }
 
@@ -3219,7 +3263,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
 static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct extent_buffer *leaf,
                            struct btrfs_inode_item *item,
-                           struct inode *inode, int log_inode_only)
+                           struct inode *inode, int log_inode_only,
+                           u64 logged_isize)
 {
        struct btrfs_map_token token;
 
@@ -3232,7 +3277,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                 * to say 'update this inode with these values'
                 */
                btrfs_set_token_inode_generation(leaf, item, 0, &token);
-               btrfs_set_token_inode_size(leaf, item, 0, &token);
+               btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
        } else {
                btrfs_set_token_inode_generation(leaf, item,
                                                 BTRFS_I(inode)->generation,
@@ -3245,19 +3290,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
        btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->atime,
                                     inode->i_atime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->atime,
                                      inode->i_atime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->mtime,
                                     inode->i_mtime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->mtime,
                                      inode->i_mtime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->ctime,
                                     inode->i_ctime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->ctime,
                                      inode->i_ctime.tv_nsec, &token);
 
        btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
@@ -3284,7 +3329,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
                return ret;
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                    struct btrfs_inode_item);
-       fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
+       fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
        btrfs_release_path(path);
        return 0;
 }
@@ -3293,7 +3338,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                               struct inode *inode,
                               struct btrfs_path *dst_path,
                               struct btrfs_path *src_path, u64 *last_extent,
-                              int start_slot, int nr, int inode_only)
+                              int start_slot, int nr, int inode_only,
+                              u64 logged_isize)
 {
        unsigned long src_offset;
        unsigned long dst_offset;
@@ -3350,7 +3396,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                                    dst_path->slots[0],
                                                    struct btrfs_inode_item);
                        fill_inode_item(trans, dst_path->nodes[0], inode_item,
-                                       inode, inode_only == LOG_INODE_EXISTS);
+                                       inode, inode_only == LOG_INODE_EXISTS,
+                                       logged_isize);
                } else {
                        copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
                                           src_offset, ins_sizes[i]);
@@ -3902,6 +3949,33 @@ process:
        return ret;
 }
 
+static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
+                            struct btrfs_path *path, u64 *size_ret)
+{
+       struct btrfs_key key;
+       int ret;
+
+       key.objectid = btrfs_ino(inode);
+       key.type = BTRFS_INODE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
+       if (ret < 0) {
+               return ret;
+       } else if (ret > 0) {
+               *size_ret = i_size_read(inode);
+       } else {
+               struct btrfs_inode_item *item;
+
+               item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                     struct btrfs_inode_item);
+               *size_ret = btrfs_inode_size(path->nodes[0], item);
+       }
+
+       btrfs_release_path(path);
+       return 0;
+}
+
 /* log a single inode in the tree log.
  * At least one parent directory for this inode must exist in the tree
  * or be logged already.
@@ -3939,6 +4013,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        bool fast_search = false;
        u64 ino = btrfs_ino(inode);
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       u64 logged_isize = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3966,15 +4041,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                max_key.type = (u8)-1;
        max_key.offset = (u64)-1;
 
-       /* Only run delayed items if we are a dir or a new file */
+       /*
+        * Only run delayed items if we are a dir or a new file.
+        * Otherwise commit the delayed inode only, which is needed in
+        * order for the log replay code to mark inodes for link count
+        * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
+        */
        if (S_ISDIR(inode->i_mode) ||
-           BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
+           BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
                ret = btrfs_commit_inode_delayed_items(trans, inode);
-               if (ret) {
-                       btrfs_free_path(path);
-                       btrfs_free_path(dst_path);
-                       return ret;
-               }
+       else
+               ret = btrfs_commit_inode_delayed_inode(inode);
+
+       if (ret) {
+               btrfs_free_path(path);
+               btrfs_free_path(dst_path);
+               return ret;
        }
 
        mutex_lock(&BTRFS_I(inode)->log_mutex);
@@ -3988,22 +4070,56 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        if (S_ISDIR(inode->i_mode)) {
                int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
 
-               if (inode_only == LOG_INODE_EXISTS)
-                       max_key_type = BTRFS_XATTR_ITEM_KEY;
+               if (inode_only == LOG_INODE_EXISTS) {
+                       max_key_type = BTRFS_INODE_EXTREF_KEY;
+                       max_key.type = max_key_type;
+               }
                ret = drop_objectid_items(trans, log, path, ino, max_key_type);
        } else {
-               if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                                      &BTRFS_I(inode)->runtime_flags)) {
-                       clear_bit(BTRFS_INODE_COPY_EVERYTHING,
-                                 &BTRFS_I(inode)->runtime_flags);
-                       ret = btrfs_truncate_inode_items(trans, log,
-                                                        inode, 0, 0);
-               } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
-                                             &BTRFS_I(inode)->runtime_flags) ||
+               if (inode_only == LOG_INODE_EXISTS) {
+                       /*
+                        * Make sure the new inode item we write to the log has
+                        * the same isize as the current one (if it exists).
+                        * This is necessary to prevent data loss after log
+                        * replay, and also to prevent doing a wrong expanding
+                        * truncate - for e.g. create file, write 4K into offset
+                        * 0, fsync, write 4K into offset 4096, add hard link,
+                        * fsync some other file (to sync log), power fail - if
+                        * we use the inode's current i_size, after log replay
+                        * we get a 8Kb file, with the last 4Kb extent as a hole
+                        * (zeroes), as if an expanding truncate happened,
+                        * instead of getting a file of 4Kb only.
+                        */
+                       err = logged_inode_size(log, inode, path,
+                                               &logged_isize);
+                       if (err)
+                               goto out_unlock;
+               }
+               if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                            &BTRFS_I(inode)->runtime_flags)) {
+                       if (inode_only == LOG_INODE_EXISTS) {
+                               max_key.type = BTRFS_INODE_EXTREF_KEY;
+                               ret = drop_objectid_items(trans, log, path, ino,
+                                                         max_key.type);
+                       } else {
+                               clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                                         &BTRFS_I(inode)->runtime_flags);
+                               clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+                                         &BTRFS_I(inode)->runtime_flags);
+                               ret = btrfs_truncate_inode_items(trans, log,
+                                                                inode, 0, 0);
+                       }
+               } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING,
+                                   &BTRFS_I(inode)->runtime_flags) ||
                           inode_only == LOG_INODE_EXISTS) {
-                       if (inode_only == LOG_INODE_ALL)
+                       if (inode_only == LOG_INODE_ALL) {
+                               clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+                                         &BTRFS_I(inode)->runtime_flags);
                                fast_search = true;
-                       max_key.type = BTRFS_XATTR_ITEM_KEY;
+                               max_key.type = BTRFS_XATTR_ITEM_KEY;
+                       } else {
+                               max_key.type = BTRFS_INODE_EXTREF_KEY;
+                       }
                        ret = drop_objectid_items(trans, log, path, ino,
                                                  max_key.type);
                } else {
@@ -4047,7 +4163,8 @@ again:
                }
 
                ret = copy_items(trans, inode, dst_path, path, &last_extent,
-                                ins_start_slot, ins_nr, inode_only);
+                                ins_start_slot, ins_nr, inode_only,
+                                logged_isize);
                if (ret < 0) {
                        err = ret;
                        goto out_unlock;
@@ -4071,7 +4188,7 @@ next_slot:
                if (ins_nr) {
                        ret = copy_items(trans, inode, dst_path, path,
                                         &last_extent, ins_start_slot,
-                                        ins_nr, inode_only);
+                                        ins_nr, inode_only, logged_isize);
                        if (ret < 0) {
                                err = ret;
                                goto out_unlock;
@@ -4092,7 +4209,8 @@ next_slot:
        }
        if (ins_nr) {
                ret = copy_items(trans, inode, dst_path, path, &last_extent,
-                                ins_start_slot, ins_nr, inode_only);
+                                ins_start_slot, ins_nr, inode_only,
+                                logged_isize);
                if (ret < 0) {
                        err = ret;
                        goto out_unlock;
@@ -4273,6 +4391,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        struct dentry *old_parent = NULL;
        int ret = 0;
        u64 last_committed = root->fs_info->last_trans_committed;
+       const struct dentry * const first_parent = parent;
+       const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
+                                last_committed);
 
        sb = inode->i_sb;
 
@@ -4328,7 +4449,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                goto end_trans;
        }
 
-       inode_only = LOG_INODE_EXISTS;
        while (1) {
                if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
                        break;
@@ -4337,8 +4457,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                if (root != BTRFS_I(inode)->root)
                        break;
 
+               /*
+                * On unlink we must make sure our immediate parent directory
+                * inode is fully logged. This is to prevent leaving dangling
+                * directory index entries and a wrong directory inode's i_size.
+                * Not doing so can result in a directory being impossible to
+                * delete after log replay (rmdir will always fail with error
+                * -ENOTEMPTY).
+                */
+               if (did_unlink && parent == first_parent)
+                       inode_only = LOG_INODE_ALL;
+               else
+                       inode_only = LOG_INODE_EXISTS;
+
                if (BTRFS_I(inode)->generation >
-                   root->fs_info->last_trans_committed) {
+                   root->fs_info->last_trans_committed ||
+                   inode_only == LOG_INODE_ALL) {
                        ret = btrfs_log_inode(trans, root, inode, inode_only,
                                              0, LLONG_MAX, ctx);
                        if (ret)
index 50c5a8762aedfc7bf5be640b96b3eb4622b58f88..8222f6f74147972ba1b654c12b013c55a3b60825 100644 (file)
@@ -1310,6 +1310,8 @@ again:
        if (ret) {
                btrfs_error(root->fs_info, ret,
                            "Failed to remove dev extent item");
+       } else {
+               trans->transaction->have_free_bgs = 1;
        }
 out:
        btrfs_free_path(path);
@@ -4196,7 +4198,7 @@ static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
 
 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
 {
-       if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
+       if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
                return;
 
        btrfs_set_fs_incompat(info, RAID56);
@@ -4803,10 +4805,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
        map = (struct map_lookup *)em->bdev;
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6)) {
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
                len = map->stripe_len * nr_data_stripes(map);
-       }
        free_extent_map(em);
        return len;
 }
@@ -4826,8 +4826,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
        map = (struct map_lookup *)em->bdev;
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6))
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
                ret = 1;
        free_extent_map(em);
        return ret;
@@ -4876,32 +4875,24 @@ static inline int parity_smaller(u64 a, u64 b)
 }
 
 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
-static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
+static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
 {
        struct btrfs_bio_stripe s;
-       int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
        int i;
        u64 l;
        int again = 1;
-       int m;
 
        while (again) {
                again = 0;
-               for (i = 0; i < real_stripes - 1; i++) {
-                       if (parity_smaller(raid_map[i], raid_map[i+1])) {
+               for (i = 0; i < num_stripes - 1; i++) {
+                       if (parity_smaller(bbio->raid_map[i],
+                                          bbio->raid_map[i+1])) {
                                s = bbio->stripes[i];
-                               l = raid_map[i];
+                               l = bbio->raid_map[i];
                                bbio->stripes[i] = bbio->stripes[i+1];
-                               raid_map[i] = raid_map[i+1];
+                               bbio->raid_map[i] = bbio->raid_map[i+1];
                                bbio->stripes[i+1] = s;
-                               raid_map[i+1] = l;
-
-                               if (bbio->tgtdev_map) {
-                                       m = bbio->tgtdev_map[i];
-                                       bbio->tgtdev_map[i] =
-                                                       bbio->tgtdev_map[i + 1];
-                                       bbio->tgtdev_map[i + 1] = m;
-                               }
+                               bbio->raid_map[i+1] = l;
 
                                again = 1;
                        }
@@ -4909,10 +4900,48 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
        }
 }
 
+static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
+{
+       struct btrfs_bio *bbio = kzalloc(
+                /* the size of the btrfs_bio */
+               sizeof(struct btrfs_bio) +
+               /* plus the variable array for the stripes */
+               sizeof(struct btrfs_bio_stripe) * (total_stripes) +
+               /* plus the variable array for the tgt dev */
+               sizeof(int) * (real_stripes) +
+               /*
+                * plus the raid_map, which includes both the tgt dev
+                * and the stripes
+                */
+               sizeof(u64) * (total_stripes),
+               GFP_NOFS);
+       if (!bbio)
+               return NULL;
+
+       atomic_set(&bbio->error, 0);
+       atomic_set(&bbio->refs, 1);
+
+       return bbio;
+}
+
+void btrfs_get_bbio(struct btrfs_bio *bbio)
+{
+       WARN_ON(!atomic_read(&bbio->refs));
+       atomic_inc(&bbio->refs);
+}
+
+void btrfs_put_bbio(struct btrfs_bio *bbio)
+{
+       if (!bbio)
+               return;
+       if (atomic_dec_and_test(&bbio->refs))
+               kfree(bbio);
+}
+
 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                             u64 logical, u64 *length,
                             struct btrfs_bio **bbio_ret,
-                            int mirror_num, u64 **raid_map_ret)
+                            int mirror_num, int need_raid_map)
 {
        struct extent_map *em;
        struct map_lookup *map;
@@ -4925,7 +4954,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        u64 stripe_nr_orig;
        u64 stripe_nr_end;
        u64 stripe_len;
-       u64 *raid_map = NULL;
        int stripe_index;
        int i;
        int ret = 0;
@@ -4976,7 +5004,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        stripe_offset = offset - stripe_offset;
 
        /* if we're here for raid56, we need to know the stripe aligned start */
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
                raid56_full_stripe_start = offset;
 
@@ -4989,8 +5017,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
 
        if (rw & REQ_DISCARD) {
                /* we don't discard raid56 yet */
-               if (map->type &
-                   (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+               if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                        ret = -EOPNOTSUPP;
                        goto out;
                }
@@ -5000,7 +5027,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                /* For writes to RAID[56], allow a full stripeset across all disks.
                   For other RAID types and for RAID[56] reads, just allow a single
                   stripe (on a single disk). */
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
+               if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
                    (rw & REQ_WRITE)) {
                        max_len = stripe_len * nr_data_stripes(map) -
                                (offset - raid56_full_stripe_start);
@@ -5047,7 +5074,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                u64 physical_of_found = 0;
 
                ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
-                            logical, &tmp_length, &tmp_bbio, 0, NULL);
+                            logical, &tmp_length, &tmp_bbio, 0, 0);
                if (ret) {
                        WARN_ON(tmp_bbio != NULL);
                        goto out;
@@ -5061,7 +5088,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                         * is not left of the left cursor
                         */
                        ret = -EIO;
-                       kfree(tmp_bbio);
+                       btrfs_put_bbio(tmp_bbio);
                        goto out;
                }
 
@@ -5096,11 +5123,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                } else {
                        WARN_ON(1);
                        ret = -EIO;
-                       kfree(tmp_bbio);
+                       btrfs_put_bbio(tmp_bbio);
                        goto out;
                }
 
-               kfree(tmp_bbio);
+               btrfs_put_bbio(tmp_bbio);
        } else if (mirror_num > map->num_stripes) {
                mirror_num = 0;
        }
@@ -5166,15 +5193,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        mirror_num = stripe_index - old_stripe_index + 1;
                }
 
-       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6)) {
-               u64 tmp;
-
-               if (raid_map_ret &&
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+               if (need_raid_map &&
                    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
                     mirror_num > 1)) {
-                       int i, rot;
-
                        /* push stripe_nr back to the start of the full stripe */
                        stripe_nr = raid56_full_stripe_start;
                        do_div(stripe_nr, stripe_len * nr_data_stripes(map));
@@ -5183,32 +5205,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        num_stripes = map->num_stripes;
                        max_errors = nr_parity_stripes(map);
 
-                       raid_map = kmalloc_array(num_stripes, sizeof(u64),
-                                          GFP_NOFS);
-                       if (!raid_map) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-
-                       /* Work out the disk rotation on this stripe-set */
-                       tmp = stripe_nr;
-                       rot = do_div(tmp, num_stripes);
-
-                       /* Fill in the logical address of each stripe */
-                       tmp = stripe_nr * nr_data_stripes(map);
-                       for (i = 0; i < nr_data_stripes(map); i++)
-                               raid_map[(i+rot) % num_stripes] =
-                                       em->start + (tmp + i) * map->stripe_len;
-
-                       raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
-                       if (map->type & BTRFS_BLOCK_GROUP_RAID6)
-                               raid_map[(i+rot+1) % num_stripes] =
-                                       RAID6_Q_STRIPE;
-
                        *length = map->stripe_len;
                        stripe_index = 0;
                        stripe_offset = 0;
                } else {
+                       u64 tmp;
+
                        /*
                         * Mirror #0 or #1 means the original data block.
                         * Mirror #2 is RAID5 parity block.
@@ -5246,17 +5248,42 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                tgtdev_indexes = num_stripes;
        }
 
-       bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes),
-                      GFP_NOFS);
+       bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
        if (!bbio) {
-               kfree(raid_map);
                ret = -ENOMEM;
                goto out;
        }
-       atomic_set(&bbio->error, 0);
        if (dev_replace_is_ongoing)
                bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
 
+       /* build raid_map */
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
+           need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+           mirror_num > 1)) {
+               u64 tmp;
+               int i, rot;
+
+               bbio->raid_map = (u64 *)((void *)bbio->stripes +
+                                sizeof(struct btrfs_bio_stripe) *
+                                num_alloc_stripes +
+                                sizeof(int) * tgtdev_indexes);
+
+               /* Work out the disk rotation on this stripe-set */
+               tmp = stripe_nr;
+               rot = do_div(tmp, num_stripes);
+
+               /* Fill in the logical address of each stripe */
+               tmp = stripe_nr * nr_data_stripes(map);
+               for (i = 0; i < nr_data_stripes(map); i++)
+                       bbio->raid_map[(i+rot) % num_stripes] =
+                               em->start + (tmp + i) * map->stripe_len;
+
+               bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
+               if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+                       bbio->raid_map[(i+rot+1) % num_stripes] =
+                               RAID6_Q_STRIPE;
+       }
+
        if (rw & REQ_DISCARD) {
                int factor = 0;
                int sub_stripes = 0;
@@ -5340,6 +5367,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
                max_errors = btrfs_chunk_max_errors(map);
 
+       if (bbio->raid_map)
+               sort_parity_stripes(bbio, num_stripes);
+
        tgtdev_indexes = 0;
        if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
            dev_replace->tgtdev != NULL) {
@@ -5427,6 +5457,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        }
 
        *bbio_ret = bbio;
+       bbio->map_type = map->type;
        bbio->num_stripes = num_stripes;
        bbio->max_errors = max_errors;
        bbio->mirror_num = mirror_num;
@@ -5443,10 +5474,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
                bbio->mirror_num = map->num_stripes + 1;
        }
-       if (raid_map) {
-               sort_parity_stripes(bbio, raid_map);
-               *raid_map_ret = raid_map;
-       }
 out:
        if (dev_replace_is_ongoing)
                btrfs_dev_replace_unlock(dev_replace);
@@ -5459,17 +5486,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                      struct btrfs_bio **bbio_ret, int mirror_num)
 {
        return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
-                                mirror_num, NULL);
+                                mirror_num, 0);
 }
 
 /* For Scrub/replace */
 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret, int mirror_num,
-                    u64 **raid_map_ret)
+                    int need_raid_map)
 {
        return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
-                                mirror_num, raid_map_ret);
+                                mirror_num, need_raid_map);
 }
 
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -5511,8 +5538,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                do_div(length, map->num_stripes / map->sub_stripes);
        else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
                do_div(length, map->num_stripes);
-       else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                             BTRFS_BLOCK_GROUP_RAID6)) {
+       else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                do_div(length, nr_data_stripes(map));
                rmap_len = map->stripe_len * nr_data_stripes(map);
        }
@@ -5565,7 +5591,7 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e
                bio_endio_nodec(bio, err);
        else
                bio_endio(bio, err);
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
 }
 
 static void btrfs_end_bio(struct bio *bio, int err)
@@ -5808,7 +5834,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
-       u64 *raid_map = NULL;
        int ret;
        int dev_nr = 0;
        int total_devs = 1;
@@ -5819,7 +5844,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 
        btrfs_bio_counter_inc_blocked(root->fs_info);
        ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
-                             mirror_num, &raid_map);
+                             mirror_num, 1);
        if (ret) {
                btrfs_bio_counter_dec(root->fs_info);
                return ret;
@@ -5832,15 +5857,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        bbio->fs_info = root->fs_info;
        atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
-       if (raid_map) {
+       if (bbio->raid_map) {
                /* In this case, map_length has been set to the length of
                   a single stripe; not the whole write */
                if (rw & WRITE) {
-                       ret = raid56_parity_write(root, bio, bbio,
-                                                 raid_map, map_length);
+                       ret = raid56_parity_write(root, bio, bbio, map_length);
                } else {
-                       ret = raid56_parity_recover(root, bio, bbio,
-                                                   raid_map, map_length,
+                       ret = raid56_parity_recover(root, bio, bbio, map_length,
                                                    mirror_num, 1);
                }
 
@@ -6238,17 +6261,22 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        struct extent_buffer *sb;
        struct btrfs_disk_key *disk_key;
        struct btrfs_chunk *chunk;
-       u8 *ptr;
-       unsigned long sb_ptr;
+       u8 *array_ptr;
+       unsigned long sb_array_offset;
        int ret = 0;
        u32 num_stripes;
        u32 array_size;
        u32 len = 0;
-       u32 cur;
+       u32 cur_offset;
        struct btrfs_key key;
 
-       sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
-                                         BTRFS_SUPER_INFO_SIZE);
+       ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
+       /*
+        * This will create extent buffer of nodesize, superblock size is
+        * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
+        * overallocate but we can keep it as-is, only the first page is used.
+        */
+       sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
        if (!sb)
                return -ENOMEM;
        btrfs_set_buffer_uptodate(sb);
@@ -6271,35 +6299,56 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
        array_size = btrfs_super_sys_array_size(super_copy);
 
-       ptr = super_copy->sys_chunk_array;
-       sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
-       cur = 0;
+       array_ptr = super_copy->sys_chunk_array;
+       sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
+       cur_offset = 0;
+
+       while (cur_offset < array_size) {
+               disk_key = (struct btrfs_disk_key *)array_ptr;
+               len = sizeof(*disk_key);
+               if (cur_offset + len > array_size)
+                       goto out_short_read;
 
-       while (cur < array_size) {
-               disk_key = (struct btrfs_disk_key *)ptr;
                btrfs_disk_key_to_cpu(&key, disk_key);
 
-               len = sizeof(*disk_key); ptr += len;
-               sb_ptr += len;
-               cur += len;
+               array_ptr += len;
+               sb_array_offset += len;
+               cur_offset += len;
 
                if (key.type == BTRFS_CHUNK_ITEM_KEY) {
-                       chunk = (struct btrfs_chunk *)sb_ptr;
+                       chunk = (struct btrfs_chunk *)sb_array_offset;
+                       /*
+                        * At least one btrfs_chunk with one stripe must be
+                        * present, exact stripe count check comes afterwards
+                        */
+                       len = btrfs_chunk_item_size(1);
+                       if (cur_offset + len > array_size)
+                               goto out_short_read;
+
+                       num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+                       len = btrfs_chunk_item_size(num_stripes);
+                       if (cur_offset + len > array_size)
+                               goto out_short_read;
+
                        ret = read_one_chunk(root, &key, sb, chunk);
                        if (ret)
                                break;
-                       num_stripes = btrfs_chunk_num_stripes(sb, chunk);
-                       len = btrfs_chunk_item_size(num_stripes);
                } else {
                        ret = -EIO;
                        break;
                }
-               ptr += len;
-               sb_ptr += len;
-               cur += len;
+               array_ptr += len;
+               sb_array_offset += len;
+               cur_offset += len;
        }
        free_extent_buffer(sb);
        return ret;
+
+out_short_read:
+       printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
+                       len, cur_offset);
+       free_extent_buffer(sb);
+       return -EIO;
 }
 
 int btrfs_read_chunk_tree(struct btrfs_root *root)
index d6fe73c0f4a2008604e7de74419af9801b80d1e4..83069dec6898a1a024baa28b98dca3174a3959c0 100644 (file)
@@ -295,8 +295,10 @@ typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
 #define BTRFS_BIO_ORIG_BIO_SUBMITTED   (1 << 0)
 
 struct btrfs_bio {
+       atomic_t refs;
        atomic_t stripes_pending;
        struct btrfs_fs_info *fs_info;
+       u64 map_type; /* get from map_lookup->type */
        bio_end_io_t *end_io;
        struct bio *orig_bio;
        unsigned long flags;
@@ -307,6 +309,12 @@ struct btrfs_bio {
        int mirror_num;
        int num_tgtdevs;
        int *tgtdev_map;
+       /*
+        * logical block numbers for the start of each stripe
+        * The last one or two are p/q.  These are sorted,
+        * so raid_map[0] is the start of our full stripe
+        */
+       u64 *raid_map;
        struct btrfs_bio_stripe stripes[];
 };
 
@@ -388,19 +396,15 @@ struct btrfs_balance_control {
 
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
                                   u64 end, u64 *length);
-
-#define btrfs_bio_size(total_stripes, real_stripes)            \
-       (sizeof(struct btrfs_bio) +                             \
-        (sizeof(struct btrfs_bio_stripe) * (total_stripes)) +  \
-        (sizeof(int) * (real_stripes)))
-
+void btrfs_get_bbio(struct btrfs_bio *bbio);
+void btrfs_put_bbio(struct btrfs_bio *bbio);
 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                    u64 logical, u64 *length,
                    struct btrfs_bio **bbio_ret, int mirror_num);
 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret, int mirror_num,
-                    u64 **raid_map_ret);
+                    int need_raid_map);
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                     u64 chunk_start, u64 physical, u64 devid,
                     u64 **logical, int *naddrs, int *stripe_len);
index ce1b115dcc28bc2968009e5e8182004da99cf29e..f601def05bdf00663f5b1666514e1810068e1b19 100644 (file)
@@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
        /* extract the directory dentry from the cwd */
        get_fs_pwd(current->fs, &path);
 
-       if (!S_ISDIR(path.dentry->d_inode->i_mode))
+       if (!d_can_lookup(path.dentry))
                goto notdir;
 
        cachefiles_begin_secure(cache, &saved_cred);
@@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
        /* extract the directory dentry from the cwd */
        get_fs_pwd(current->fs, &path);
 
-       if (!S_ISDIR(path.dentry->d_inode->i_mode))
+       if (!d_can_lookup(path.dentry))
                goto notdir;
 
        cachefiles_begin_secure(cache, &saved_cred);
index 1c7293c3a93ae935c5be8cdfc149fcd3f84e0dad..232426214fdd1849b21a4e3c080563e2132e8bd2 100644 (file)
@@ -437,7 +437,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
        if (!object->backer)
                return -ENOBUFS;
 
-       ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+       ASSERT(d_is_reg(object->backer));
 
        fscache_set_store_limit(&object->fscache, ni_size);
 
@@ -501,7 +501,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op)
               op->object->debug_id, (unsigned long long)ni_size);
 
        if (object->backer) {
-               ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+               ASSERT(d_is_reg(object->backer));
 
                fscache_set_store_limit(&object->fscache, ni_size);
 
index 7f8e83f9d74eb87712db9e8fdc7da95fcccaefdb..1e51714eb33e15f5e6624699021d26d95e1c16aa 100644 (file)
@@ -277,7 +277,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
        _debug("remove %p from %p", rep, dir);
 
        /* non-directories can just be unlinked */
-       if (!S_ISDIR(rep->d_inode->i_mode)) {
+       if (!d_is_dir(rep)) {
                _debug("unlink stale object");
 
                path.mnt = cache->mnt;
@@ -323,7 +323,7 @@ try_again:
                return 0;
        }
 
-       if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
+       if (!d_can_lookup(cache->graveyard)) {
                unlock_rename(cache->graveyard, dir);
                cachefiles_io_error(cache, "Graveyard no longer a directory");
                return -EIO;
@@ -475,7 +475,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
        ASSERT(parent->dentry);
        ASSERT(parent->dentry->d_inode);
 
-       if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
+       if (!(d_is_dir(parent->dentry))) {
                // TODO: convert file to dir
                _leave("looking up in none directory");
                return -ENOBUFS;
@@ -539,7 +539,7 @@ lookup_again:
                        _debug("mkdir -> %p{%p{ino=%lu}}",
                               next, next->d_inode, next->d_inode->i_ino);
 
-               } else if (!S_ISDIR(next->d_inode->i_mode)) {
+               } else if (!d_can_lookup(next)) {
                        pr_err("inode %lu is not a directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
@@ -568,8 +568,8 @@ lookup_again:
                        _debug("create -> %p{%p{ino=%lu}}",
                               next, next->d_inode, next->d_inode->i_ino);
 
-               } else if (!S_ISDIR(next->d_inode->i_mode) &&
-                          !S_ISREG(next->d_inode->i_mode)
+               } else if (!d_can_lookup(next) &&
+                          !d_is_reg(next)
                           ) {
                        pr_err("inode %lu is not a file or directory\n",
                               next->d_inode->i_ino);
@@ -642,7 +642,7 @@ lookup_again:
 
        /* open a file interface onto a data file */
        if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
-               if (S_ISREG(object->dentry->d_inode->i_mode)) {
+               if (d_is_reg(object->dentry)) {
                        const struct address_space_operations *aops;
 
                        ret = -EPERM;
@@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
        /* we need to make sure the subdir is a directory */
        ASSERT(subdir->d_inode);
 
-       if (!S_ISDIR(subdir->d_inode->i_mode)) {
+       if (!d_can_lookup(subdir)) {
                pr_err("%s is not a directory\n", dirname);
                ret = -EIO;
                goto check_error;
index 616db0e77b44bd8481782047829dfd06605700ae..c6cd8d7a4eef91b379efc75888a94eba6345ac33 100644 (file)
@@ -900,7 +900,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
                return -ENOBUFS;
        }
 
-       ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+       ASSERT(d_is_reg(object->backer));
 
        cache = container_of(object->fscache.cache,
                             struct cachefiles_cache, cache);
index 5bd853ba44ffccfc39b45b511e6f3b31bd3c1e37..64fa248343f65461db232ee4ae0939beff0fc05c 100644 (file)
@@ -40,20 +40,6 @@ static inline void ceph_set_cached_acl(struct inode *inode,
        spin_unlock(&ci->i_ceph_lock);
 }
 
-static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
-                                                       int type)
-{
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct posix_acl *acl = ACL_NOT_CACHED;
-
-       spin_lock(&ci->i_ceph_lock);
-       if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0))
-               acl = get_cached_acl(inode, type);
-       spin_unlock(&ci->i_ceph_lock);
-
-       return acl;
-}
-
 struct posix_acl *ceph_get_acl(struct inode *inode, int type)
 {
        int size;
index 24be059fd1f8e073b7ee1d250b20f0bcdab16da0..fd5599d323620a2c5617ea5355e2e1320d6a0954 100644 (file)
@@ -196,17 +196,22 @@ static int readpage_nounlock(struct file *filp, struct page *page)
        u64 len = PAGE_CACHE_SIZE;
 
        if (off >= i_size_read(inode)) {
-               zero_user_segment(page, err, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
                SetPageUptodate(page);
                return 0;
        }
 
-       /*
-        * Uptodate inline data should have been added into page cache
-        * while getting Fcr caps.
-        */
-       if (ci->i_inline_version != CEPH_INLINE_NONE)
-               return -EINVAL;
+       if (ci->i_inline_version != CEPH_INLINE_NONE) {
+               /*
+                * Uptodate inline data should have been added
+                * into page cache while getting Fcr caps.
+                */
+               if (off == 0)
+                       return -EINVAL;
+               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               SetPageUptodate(page);
+               return 0;
+       }
 
        err = ceph_readpage_from_fscache(inode, page);
        if (err == 0)
index b93c631c6c87d550e1f9674aaec5d41906d12143..8172775428a0b9165e68d293d23812a3f2be1593 100644 (file)
@@ -577,7 +577,6 @@ void ceph_add_cap(struct inode *inode,
                struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
                                                               realmino);
                if (realm) {
-                       ceph_get_snap_realm(mdsc, realm);
                        spin_lock(&realm->inodes_with_caps_lock);
                        ci->i_snap_realm = realm;
                        list_add(&ci->i_snap_realm_item,
@@ -1451,8 +1450,8 @@ static int __mark_caps_flushing(struct inode *inode,
        spin_lock(&mdsc->cap_dirty_lock);
        list_del_init(&ci->i_dirty_item);
 
-       ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
        if (list_empty(&ci->i_flushing_item)) {
+               ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
                mdsc->num_cap_flushing++;
                dout(" inode %p now flushing seq %lld\n", inode,
@@ -2073,17 +2072,16 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
  * requested from the MDS.
  */
 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
-                           loff_t endoff, int *got, struct page **pinned_page,
-                           int *check_max, int *err)
+                           loff_t endoff, int *got, int *check_max, int *err)
 {
        struct inode *inode = &ci->vfs_inode;
        int ret = 0;
-       int have, implemented, _got = 0;
+       int have, implemented;
        int file_wanted;
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
-again:
+
        spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
@@ -2138,50 +2136,34 @@ again:
                     inode, ceph_cap_string(have), ceph_cap_string(not),
                     ceph_cap_string(revoking));
                if ((revoking & not) == 0) {
-                       _got = need | (have & want);
-                       __take_cap_refs(ci, _got);
+                       *got = need | (have & want);
+                       __take_cap_refs(ci, *got);
                        ret = 1;
                }
        } else {
+               int session_readonly = false;
+               if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
+                       struct ceph_mds_session *s = ci->i_auth_cap->session;
+                       spin_lock(&s->s_cap_lock);
+                       session_readonly = s->s_readonly;
+                       spin_unlock(&s->s_cap_lock);
+               }
+               if (session_readonly) {
+                       dout("get_cap_refs %p needed %s but mds%d readonly\n",
+                            inode, ceph_cap_string(need), ci->i_auth_cap->mds);
+                       *err = -EROFS;
+                       ret = 1;
+                       goto out_unlock;
+               }
+
                dout("get_cap_refs %p have %s needed %s\n", inode,
                     ceph_cap_string(have), ceph_cap_string(need));
        }
 out_unlock:
        spin_unlock(&ci->i_ceph_lock);
 
-       if (ci->i_inline_version != CEPH_INLINE_NONE &&
-           (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
-           i_size_read(inode) > 0) {
-               int ret1;
-               struct page *page = find_get_page(inode->i_mapping, 0);
-               if (page) {
-                       if (PageUptodate(page)) {
-                               *pinned_page = page;
-                               goto out;
-                       }
-                       page_cache_release(page);
-               }
-               /*
-                * drop cap refs first because getattr while holding
-                * caps refs can cause deadlock.
-                */
-               ceph_put_cap_refs(ci, _got);
-               _got = 0;
-
-               /* getattr request will bring inline data into page cache */
-               ret1 = __ceph_do_getattr(inode, NULL,
-                                        CEPH_STAT_CAP_INLINE_DATA, true);
-               if (ret1 >= 0) {
-                       ret = 0;
-                       goto again;
-               }
-               *err = ret1;
-               ret = 1;
-       }
-out:
        dout("get_cap_refs %p ret %d got %s\n", inode,
-            ret, ceph_cap_string(_got));
-       *got = _got;
+            ret, ceph_cap_string(*got));
        return ret;
 }
 
@@ -2221,22 +2203,52 @@ static void check_max_size(struct inode *inode, loff_t endoff)
 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                  loff_t endoff, int *got, struct page **pinned_page)
 {
-       int check_max, ret, err;
+       int _got, check_max, ret, err = 0;
 
 retry:
        if (endoff > 0)
                check_max_size(&ci->vfs_inode, endoff);
+       _got = 0;
        check_max = 0;
-       err = 0;
        ret = wait_event_interruptible(ci->i_cap_wq,
-                                      try_get_cap_refs(ci, need, want, endoff,
-                                                       got, pinned_page,
-                                                       &check_max, &err));
+                               try_get_cap_refs(ci, need, want, endoff,
+                                                &_got, &check_max, &err));
        if (err)
                ret = err;
+       if (ret < 0)
+               return ret;
+
        if (check_max)
                goto retry;
-       return ret;
+
+       if (ci->i_inline_version != CEPH_INLINE_NONE &&
+           (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
+           i_size_read(&ci->vfs_inode) > 0) {
+               struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0);
+               if (page) {
+                       if (PageUptodate(page)) {
+                               *pinned_page = page;
+                               goto out;
+                       }
+                       page_cache_release(page);
+               }
+               /*
+                * drop cap refs first because getattr while holding
+                * caps refs can cause deadlock.
+                */
+               ceph_put_cap_refs(ci, _got);
+               _got = 0;
+
+               /* getattr request will bring inline data into page cache */
+               ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
+                                       CEPH_STAT_CAP_INLINE_DATA, true);
+               if (ret < 0)
+                       return ret;
+               goto retry;
+       }
+out:
+       *got = _got;
+       return 0;
 }
 
 /*
@@ -2432,13 +2444,13 @@ static void invalidate_aliases(struct inode *inode)
  */
 static void handle_cap_grant(struct ceph_mds_client *mdsc,
                             struct inode *inode, struct ceph_mds_caps *grant,
-                            void *snaptrace, int snaptrace_len,
                             u64 inline_version,
                             void *inline_data, int inline_len,
                             struct ceph_buffer *xattr_buf,
                             struct ceph_mds_session *session,
                             struct ceph_cap *cap, int issued)
        __releases(ci->i_ceph_lock)
+       __releases(mdsc->snap_rwsem)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2639,10 +2651,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        spin_unlock(&ci->i_ceph_lock);
 
        if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
-               down_write(&mdsc->snap_rwsem);
-               ceph_update_snap_trace(mdsc, snaptrace,
-                                      snaptrace + snaptrace_len, false);
-               downgrade_write(&mdsc->snap_rwsem);
                kick_flushing_inode_caps(mdsc, session, inode);
                up_read(&mdsc->snap_rwsem);
                if (newcaps & ~issued)
@@ -3052,6 +3060,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
        struct ceph_mds_cap_peer *peer = NULL;
+       struct ceph_snap_realm *realm;
        int mds = session->s_mds;
        int op, issued;
        u32 seq, mseq;
@@ -3153,11 +3162,23 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                goto done_unlocked;
 
        case CEPH_CAP_OP_IMPORT:
+               realm = NULL;
+               if (snaptrace_len) {
+                       down_write(&mdsc->snap_rwsem);
+                       ceph_update_snap_trace(mdsc, snaptrace,
+                                              snaptrace + snaptrace_len,
+                                              false, &realm);
+                       downgrade_write(&mdsc->snap_rwsem);
+               } else {
+                       down_read(&mdsc->snap_rwsem);
+               }
                handle_cap_import(mdsc, inode, h, peer, session,
                                  &cap, &issued);
-               handle_cap_grant(mdsc, inode, h,  snaptrace, snaptrace_len,
+               handle_cap_grant(mdsc, inode, h,
                                 inline_version, inline_data, inline_len,
                                 msg->middle, session, cap, issued);
+               if (realm)
+                       ceph_put_snap_realm(mdsc, realm);
                goto done_unlocked;
        }
 
@@ -3177,7 +3198,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        case CEPH_CAP_OP_GRANT:
                __ceph_caps_issued(ci, &issued);
                issued |= __ceph_caps_dirty(ci);
-               handle_cap_grant(mdsc, inode, h, NULL, 0,
+               handle_cap_grant(mdsc, inode, h,
                                 inline_version, inline_data, inline_len,
                                 msg->middle, session, cap, issued);
                goto done_unlocked;
index c241603764fdc560ae72cea19410b49c15e860d2..83e9976f718983ccfb5d95c56893e81c7d01caa9 100644 (file)
@@ -26,8 +26,6 @@
  * point by name.
  */
 
-const struct inode_operations ceph_dir_iops;
-const struct file_operations ceph_dir_fops;
 const struct dentry_operations ceph_dentry_ops;
 
 /*
@@ -672,13 +670,17 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
                /*
                 * We created the item, then did a lookup, and found
                 * it was already linked to another inode we already
-                * had in our cache (and thus got spliced).  Link our
-                * dentry to that inode, but don't hash it, just in
-                * case the VFS wants to dereference it.
+                * had in our cache (and thus got spliced). To not
+                * confuse VFS (especially when inode is a directory),
+                * we don't link our dentry to that inode, return an
+                * error instead.
+                *
+                * This event should be rare and it happens only when
+                * we talk to old MDS. Recent MDS does not send traceless
+                * reply for request that creates new inode.
                 */
-               BUG_ON(!result->d_inode);
-               d_instantiate(dentry, result->d_inode);
-               return 0;
+               d_drop(result);
+               return -ESTALE;
        }
        return PTR_ERR(result);
 }
@@ -902,7 +904,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
        } else if (ceph_snap(dir) == CEPH_NOSNAP) {
                dout("unlink/rmdir dir %p dn %p inode %p\n",
                     dir, dentry, inode);
-               op = S_ISDIR(dentry->d_inode->i_mode) ?
+               op = d_is_dir(dentry) ?
                        CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
        } else
                goto out;
@@ -1335,6 +1337,13 @@ const struct file_operations ceph_dir_fops = {
        .fsync = ceph_dir_fsync,
 };
 
+const struct file_operations ceph_snapdir_fops = {
+       .iterate = ceph_readdir,
+       .llseek = ceph_dir_llseek,
+       .open = ceph_open,
+       .release = ceph_release,
+};
+
 const struct inode_operations ceph_dir_iops = {
        .lookup = ceph_lookup,
        .permission = ceph_permission,
@@ -1357,6 +1366,14 @@ const struct inode_operations ceph_dir_iops = {
        .atomic_open = ceph_atomic_open,
 };
 
+const struct inode_operations ceph_snapdir_iops = {
+       .lookup = ceph_lookup,
+       .permission = ceph_permission,
+       .getattr = ceph_getattr,
+       .mkdir = ceph_mkdir,
+       .rmdir = ceph_unlink,
+};
+
 const struct dentry_operations ceph_dentry_ops = {
        .d_revalidate = ceph_d_revalidate,
        .d_release = ceph_d_release,
index 905986dd4c3c9dabaf5bf3ea5e584e0ecc719a5b..d533075a823d5eb92e709547b8fe790c59cba981 100644 (file)
@@ -275,10 +275,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        err = ceph_mdsc_do_request(mdsc,
                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
                                   req);
+       err = ceph_handle_snapdir(req, dentry, err);
        if (err)
                goto out_req;
 
-       err = ceph_handle_snapdir(req, dentry, err);
        if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
 
@@ -292,7 +292,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        }
        if (err)
                goto out_req;
-       if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) {
                /* make vfs retry on splice, ENOENT, or symlink */
                dout("atomic_open finish_no_open on dn %p\n", dn);
                err = finish_no_open(file, dn);
@@ -392,13 +392,14 @@ more:
        if (ret >= 0) {
                int didpages;
                if (was_short && (pos + ret < inode->i_size)) {
-                       u64 tmp = min(this_len - ret,
-                                       inode->i_size - pos - ret);
+                       int zlen = min(this_len - ret,
+                                      inode->i_size - pos - ret);
+                       int zoff = (o_direct ? buf_align : io_align) +
+                                   read + ret;
                        dout(" zero gap %llu to %llu\n",
-                               pos + ret, pos + ret + tmp);
-                       ceph_zero_page_vector_range(page_align + read + ret,
-                                                       tmp, pages);
-                       ret += tmp;
+                               pos + ret, pos + ret + zlen);
+                       ceph_zero_page_vector_range(zoff, zlen, pages);
+                       ret += zlen;
                }
 
                didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
@@ -878,28 +879,34 @@ again:
 
                i_size = i_size_read(inode);
                if (retry_op == READ_INLINE) {
-                       /* does not support inline data > PAGE_SIZE */
-                       if (i_size > PAGE_CACHE_SIZE) {
-                               ret = -EIO;
-                       } else if (iocb->ki_pos < i_size) {
+                       BUG_ON(ret > 0 || read > 0);
+                       if (iocb->ki_pos < i_size &&
+                           iocb->ki_pos < PAGE_CACHE_SIZE) {
                                loff_t end = min_t(loff_t, i_size,
                                                   iocb->ki_pos + len);
+                               end = min_t(loff_t, end, PAGE_CACHE_SIZE);
                                if (statret < end)
                                        zero_user_segment(page, statret, end);
                                ret = copy_page_to_iter(page,
                                                iocb->ki_pos & ~PAGE_MASK,
                                                end - iocb->ki_pos, to);
                                iocb->ki_pos += ret;
-                       } else {
-                               ret = 0;
+                               read += ret;
+                       }
+                       if (iocb->ki_pos < i_size && read < len) {
+                               size_t zlen = min_t(size_t, len - read,
+                                                   i_size - iocb->ki_pos);
+                               ret = iov_iter_zero(zlen, to);
+                               iocb->ki_pos += ret;
+                               read += ret;
                        }
                        __free_pages(page, 0);
-                       return ret;
+                       return read;
                }
 
                /* hit EOF or hole? */
                if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
-                       ret < len) {
+                   ret < len) {
                        dout("sync_read hit hole, ppos %lld < size %lld"
                             ", reading more\n", iocb->ki_pos,
                             inode->i_size);
index 6b51736051541ea54b6956cf6dce57576f8ffe6f..119c43c80638788f648272fbd1593e6e6bdbfd94 100644 (file)
@@ -82,8 +82,8 @@ struct inode *ceph_get_snapdir(struct inode *parent)
        inode->i_mode = parent->i_mode;
        inode->i_uid = parent->i_uid;
        inode->i_gid = parent->i_gid;
-       inode->i_op = &ceph_dir_iops;
-       inode->i_fop = &ceph_dir_fops;
+       inode->i_op = &ceph_snapdir_iops;
+       inode->i_fop = &ceph_snapdir_fops;
        ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
        ci->i_rbytes = 0;
        return inode;
@@ -838,30 +838,31 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
                       ceph_vinop(inode), inode->i_mode);
        }
 
-       /* set dir completion flag? */
-       if (S_ISDIR(inode->i_mode) &&
-           ci->i_files == 0 && ci->i_subdirs == 0 &&
-           ceph_snap(inode) == CEPH_NOSNAP &&
-           (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
-           (issued & CEPH_CAP_FILE_EXCL) == 0 &&
-           !__ceph_dir_is_complete(ci)) {
-               dout(" marking %p complete (empty)\n", inode);
-               __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count),
-                                       ci->i_ordered_count);
-       }
-
        /* were we issued a capability? */
        if (info->cap.caps) {
                if (ceph_snap(inode) == CEPH_NOSNAP) {
+                       unsigned caps = le32_to_cpu(info->cap.caps);
                        ceph_add_cap(inode, session,
                                     le64_to_cpu(info->cap.cap_id),
-                                    cap_fmode,
-                                    le32_to_cpu(info->cap.caps),
+                                    cap_fmode, caps,
                                     le32_to_cpu(info->cap.wanted),
                                     le32_to_cpu(info->cap.seq),
                                     le32_to_cpu(info->cap.mseq),
                                     le64_to_cpu(info->cap.realm),
                                     info->cap.flags, &new_cap);
+
+                       /* set dir completion flag? */
+                       if (S_ISDIR(inode->i_mode) &&
+                           ci->i_files == 0 && ci->i_subdirs == 0 &&
+                           (caps & CEPH_CAP_FILE_SHARED) &&
+                           (issued & CEPH_CAP_FILE_EXCL) == 0 &&
+                           !__ceph_dir_is_complete(ci)) {
+                               dout(" marking %p complete (empty)\n", inode);
+                               __ceph_dir_set_complete(ci,
+                                       atomic_read(&ci->i_release_count),
+                                       ci->i_ordered_count);
+                       }
+
                        wake = true;
                } else {
                        dout(" %p got snap_caps %s\n", inode,
@@ -1446,12 +1447,14 @@ retry_lookup:
                }
 
                if (!dn->d_inode) {
-                       dn = splice_dentry(dn, in, NULL);
-                       if (IS_ERR(dn)) {
-                               err = PTR_ERR(dn);
+                       struct dentry *realdn = splice_dentry(dn, in, NULL);
+                       if (IS_ERR(realdn)) {
+                               err = PTR_ERR(realdn);
+                               d_drop(dn);
                                dn = NULL;
                                goto next_item;
                        }
+                       dn = realdn;
                }
 
                di = dn->d_fsdata;
index 06ea5cd05cd9ebcf47161b813fc0a39472911b93..4347039ecc183d538c23f32019e5213da2ebf2f4 100644 (file)
@@ -245,6 +245,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
  */
 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
 {
+       struct file_lock *lock;
        struct file_lock_context *ctx;
 
        *fcntl_count = 0;
@@ -252,8 +253,12 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
 
        ctx = inode->i_flctx;
        if (ctx) {
-               *fcntl_count = ctx->flc_posix_cnt;
-               *flock_count = ctx->flc_flock_cnt;
+               spin_lock(&ctx->flc_lock);
+               list_for_each_entry(lock, &ctx->flc_posix, fl_list)
+                       ++(*fcntl_count);
+               list_for_each_entry(lock, &ctx->flc_flock, fl_list)
+                       ++(*flock_count);
+               spin_unlock(&ctx->flc_lock);
        }
        dout("counted %d flock locks and %d fcntl locks",
             *flock_count, *fcntl_count);
index 5f62fb7a5d0ae9b3338c00b724691a096c7a621b..71c073f38e547522c81fa335fcae85c5d8c7ef58 100644 (file)
@@ -480,6 +480,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
                mdsc->max_sessions = newmax;
        }
        mdsc->sessions[mds] = s;
+       atomic_inc(&mdsc->num_sessions);
        atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
 
        ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
@@ -503,6 +504,7 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
        mdsc->sessions[s->s_mds] = NULL;
        ceph_con_close(&s->s_con);
        ceph_put_mds_session(s);
+       atomic_dec(&mdsc->num_sessions);
 }
 
 /*
@@ -842,8 +844,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        struct ceph_options *opt = mdsc->fsc->client->options;
        void *p;
 
-       const char* metadata[3][2] = {
+       const char* metadata[][2] = {
                {"hostname", utsname()->nodename},
+               {"kernel_version", utsname()->release},
                {"entity_id", opt->name ? opt->name : ""},
                {NULL, NULL}
        };
@@ -1464,19 +1467,33 @@ out_unlocked:
        return err;
 }
 
+static int check_cap_flush(struct inode *inode, u64 want_flush_seq)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int ret;
+       spin_lock(&ci->i_ceph_lock);
+       if (ci->i_flushing_caps)
+               ret = ci->i_cap_flush_seq >= want_flush_seq;
+       else
+               ret = 1;
+       spin_unlock(&ci->i_ceph_lock);
+       return ret;
+}
+
 /*
  * flush all dirty inode data to disk.
  *
  * returns true if we've flushed through want_flush_seq
  */
-static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
+static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
 {
-       int mds, ret = 1;
+       int mds;
 
        dout("check_cap_flush want %lld\n", want_flush_seq);
        mutex_lock(&mdsc->mutex);
-       for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
+       for (mds = 0; mds < mdsc->max_sessions; mds++) {
                struct ceph_mds_session *session = mdsc->sessions[mds];
+               struct inode *inode = NULL;
 
                if (!session)
                        continue;
@@ -1489,29 +1506,29 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                list_entry(session->s_cap_flushing.next,
                                           struct ceph_inode_info,
                                           i_flushing_item);
-                       struct inode *inode = &ci->vfs_inode;
 
-                       spin_lock(&ci->i_ceph_lock);
-                       if (ci->i_cap_flush_seq <= want_flush_seq) {
+                       if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) {
                                dout("check_cap_flush still flushing %p "
-                                    "seq %lld <= %lld to mds%d\n", inode,
-                                    ci->i_cap_flush_seq, want_flush_seq,
-                                    session->s_mds);
-                               ret = 0;
+                                    "seq %lld <= %lld to mds%d\n",
+                                    &ci->vfs_inode, ci->i_cap_flush_seq,
+                                    want_flush_seq, session->s_mds);
+                               inode = igrab(&ci->vfs_inode);
                        }
-                       spin_unlock(&ci->i_ceph_lock);
                }
                mutex_unlock(&session->s_mutex);
                ceph_put_mds_session(session);
 
-               if (!ret)
-                       return ret;
+               if (inode) {
+                       wait_event(mdsc->cap_flushing_wq,
+                                  check_cap_flush(inode, want_flush_seq));
+                       iput(inode);
+               }
+
                mutex_lock(&mdsc->mutex);
        }
 
        mutex_unlock(&mdsc->mutex);
        dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
-       return ret;
 }
 
 /*
@@ -1923,7 +1940,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        head->num_releases = cpu_to_le16(releases);
 
        /* time stamp */
-       ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp));
+       {
+               struct ceph_timespec ts;
+               ceph_encode_timespec(&ts, &req->r_stamp);
+               ceph_encode_copy(&p, &ts, sizeof(ts));
+       }
 
        BUG_ON(p > end);
        msg->front.iov_len = p - msg->front.iov_base;
@@ -2012,7 +2033,11 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
 
                /* time stamp */
                p = msg->front.iov_base + req->r_request_release_offset;
-               ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp));
+               {
+                       struct ceph_timespec ts;
+                       ceph_encode_timespec(&ts, &req->r_stamp);
+                       ceph_encode_copy(&p, &ts, sizeof(ts));
+               }
 
                msg->front.iov_len = p - msg->front.iov_base;
                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -2159,6 +2184,8 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
                p = rb_next(p);
                if (req->r_got_unsafe)
                        continue;
+               if (req->r_attempts > 0)
+                       continue; /* only new requests */
                if (req->r_session &&
                    req->r_session->s_mds == mds) {
                        dout(" kicking tid %llu\n", req->r_tid);
@@ -2286,6 +2313,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        struct ceph_mds_request *req;
        struct ceph_mds_reply_head *head = msg->front.iov_base;
        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
+       struct ceph_snap_realm *realm;
        u64 tid;
        int err, result;
        int mds = session->s_mds;
@@ -2401,11 +2429,13 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        }
 
        /* snap trace */
+       realm = NULL;
        if (rinfo->snapblob_len) {
                down_write(&mdsc->snap_rwsem);
                ceph_update_snap_trace(mdsc, rinfo->snapblob,
-                              rinfo->snapblob + rinfo->snapblob_len,
-                              le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
+                               rinfo->snapblob + rinfo->snapblob_len,
+                               le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
+                               &realm);
                downgrade_write(&mdsc->snap_rwsem);
        } else {
                down_read(&mdsc->snap_rwsem);
@@ -2423,6 +2453,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        mutex_unlock(&req->r_fill_mutex);
 
        up_read(&mdsc->snap_rwsem);
+       if (realm)
+               ceph_put_snap_realm(mdsc, realm);
 out_err:
        mutex_lock(&mdsc->mutex);
        if (!req->r_aborted) {
@@ -2487,6 +2519,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
                dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
                BUG_ON(req->r_err);
                BUG_ON(req->r_got_result);
+               req->r_attempts = 0;
                req->r_num_fwd = fwd_seq;
                req->r_resend_mds = next_mds;
                put_request_session(req);
@@ -2580,6 +2613,14 @@ static void handle_session(struct ceph_mds_session *session,
                send_flushmsg_ack(mdsc, session, seq);
                break;
 
+       case CEPH_SESSION_FORCE_RO:
+               dout("force_session_readonly %p\n", session);
+               spin_lock(&session->s_cap_lock);
+               session->s_readonly = true;
+               spin_unlock(&session->s_cap_lock);
+               wake_up_session_caps(session, 0);
+               break;
+
        default:
                pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
                WARN_ON(1);
@@ -2610,6 +2651,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
                                   struct ceph_mds_session *session)
 {
        struct ceph_mds_request *req, *nreq;
+       struct rb_node *p;
        int err;
 
        dout("replay_unsafe_requests mds%d\n", session->s_mds);
@@ -2622,6 +2664,28 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
                        ceph_con_send(&session->s_con, req->r_request);
                }
        }
+
+       /*
+        * also re-send old requests when MDS enters reconnect stage. So that MDS
+        * can process completed request in clientreplay stage.
+        */
+       p = rb_first(&mdsc->request_tree);
+       while (p) {
+               req = rb_entry(p, struct ceph_mds_request, r_node);
+               p = rb_next(p);
+               if (req->r_got_unsafe)
+                       continue;
+               if (req->r_attempts == 0)
+                       continue; /* only old requests */
+               if (req->r_session &&
+                   req->r_session->s_mds == session->s_mds) {
+                       err = __prepare_send_request(mdsc, req, session->s_mds);
+                       if (!err) {
+                               ceph_msg_get(req->r_request);
+                               ceph_con_send(&session->s_con, req->r_request);
+                       }
+               }
+       }
        mutex_unlock(&mdsc->mutex);
 }
 
@@ -2787,6 +2851,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        spin_unlock(&session->s_gen_ttl_lock);
 
        spin_lock(&session->s_cap_lock);
+       /* don't know if session is readonly */
+       session->s_readonly = 0;
        /*
         * notify __ceph_remove_cap() that we are composing cap reconnect.
         * If a cap get released before being added to the cap reconnect,
@@ -2933,9 +2999,6 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                                mutex_unlock(&s->s_mutex);
                                s->s_state = CEPH_MDS_SESSION_RESTARTING;
                        }
-
-                       /* kick any requests waiting on the recovering mds */
-                       kick_requests(mdsc, i);
                } else if (oldstate == newstate) {
                        continue;  /* nothing new with this mds */
                }
@@ -3295,6 +3358,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        init_waitqueue_head(&mdsc->session_close_wq);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
        mdsc->sessions = NULL;
+       atomic_set(&mdsc->num_sessions, 0);
        mdsc->max_sessions = 0;
        mdsc->stopping = 0;
        init_rwsem(&mdsc->snap_rwsem);
@@ -3428,14 +3492,17 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        dout("sync\n");
        mutex_lock(&mdsc->mutex);
        want_tid = mdsc->last_tid;
-       want_flush = mdsc->cap_flush_seq;
        mutex_unlock(&mdsc->mutex);
-       dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
 
        ceph_flush_dirty_caps(mdsc);
+       spin_lock(&mdsc->cap_dirty_lock);
+       want_flush = mdsc->cap_flush_seq;
+       spin_unlock(&mdsc->cap_dirty_lock);
+
+       dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
 
        wait_unsafe_requests(mdsc, want_tid);
-       wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
+       wait_caps_flush(mdsc, want_flush);
 }
 
 /*
@@ -3443,17 +3510,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
  */
 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
 {
-       int i, n = 0;
-
        if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
                return true;
-
-       mutex_lock(&mdsc->mutex);
-       for (i = 0; i < mdsc->max_sessions; i++)
-               if (mdsc->sessions[i])
-                       n++;
-       mutex_unlock(&mdsc->mutex);
-       return n == 0;
+       return atomic_read(&mdsc->num_sessions) == 0;
 }
 
 /*
index e2817d00f7d9f51922b2a1c04b20de97e410edfe..1875b5d985c6b0df2fbb38e16f39a78ecc76750d 100644 (file)
@@ -137,6 +137,7 @@ struct ceph_mds_session {
        int               s_nr_caps, s_trim_caps;
        int               s_num_cap_releases;
        int               s_cap_reconnect;
+       int               s_readonly;
        struct list_head  s_cap_releases; /* waiting cap_release messages */
        struct list_head  s_cap_releases_done; /* ready to send */
        struct ceph_cap  *s_cap_iterator;
@@ -272,6 +273,7 @@ struct ceph_mds_client {
        struct list_head        waiting_for_map;
 
        struct ceph_mds_session **sessions;    /* NULL for mds if no session */
+       atomic_t                num_sessions;
        int                     max_sessions;  /* len of s_mds_sessions */
        int                     stopping;      /* true if shutting down */
 
index ce35fbd4ba5d3fef3f5a05abc314df300fa4ff49..a97e39f09ba683349bb5f97e44f0d229b3a88936 100644 (file)
@@ -70,13 +70,11 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
         * safe.  we do need to protect against concurrent empty list
         * additions, however.
         */
-       if (atomic_read(&realm->nref) == 0) {
+       if (atomic_inc_return(&realm->nref) == 1) {
                spin_lock(&mdsc->snap_empty_lock);
                list_del_init(&realm->empty_item);
                spin_unlock(&mdsc->snap_empty_lock);
        }
-
-       atomic_inc(&realm->nref);
 }
 
 static void __insert_snap_realm(struct rb_root *root,
@@ -116,7 +114,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
        if (!realm)
                return ERR_PTR(-ENOMEM);
 
-       atomic_set(&realm->nref, 0);    /* tree does not take a ref */
+       atomic_set(&realm->nref, 1);    /* for caller */
        realm->ino = ino;
        INIT_LIST_HEAD(&realm->children);
        INIT_LIST_HEAD(&realm->child_item);
@@ -134,8 +132,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
  *
  * caller must hold snap_rwsem for write.
  */
-struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
-                                              u64 ino)
+static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
+                                                  u64 ino)
 {
        struct rb_node *n = mdsc->snap_realms.rb_node;
        struct ceph_snap_realm *r;
@@ -154,6 +152,16 @@ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
        return NULL;
 }
 
+struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
+                                              u64 ino)
+{
+       struct ceph_snap_realm *r;
+       r = __lookup_snap_realm(mdsc, ino);
+       if (r)
+               ceph_get_snap_realm(mdsc, r);
+       return r;
+}
+
 static void __put_snap_realm(struct ceph_mds_client *mdsc,
                             struct ceph_snap_realm *realm);
 
@@ -273,7 +281,6 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
        }
        realm->parent_ino = parentino;
        realm->parent = parent;
-       ceph_get_snap_realm(mdsc, parent);
        list_add(&realm->child_item, &parent->children);
        return 1;
 }
@@ -631,12 +638,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
  * Caller must hold snap_rwsem for write.
  */
 int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
-                          void *p, void *e, bool deletion)
+                          void *p, void *e, bool deletion,
+                          struct ceph_snap_realm **realm_ret)
 {
        struct ceph_mds_snap_realm *ri;    /* encoded */
        __le64 *snaps;                     /* encoded */
        __le64 *prior_parent_snaps;        /* encoded */
-       struct ceph_snap_realm *realm;
+       struct ceph_snap_realm *realm = NULL;
+       struct ceph_snap_realm *first_realm = NULL;
        int invalidate = 0;
        int err = -ENOMEM;
        LIST_HEAD(dirty_realms);
@@ -704,13 +713,18 @@ more:
        dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
             realm, invalidate, p, e);
 
-       if (p < e)
-               goto more;
-
        /* invalidate when we reach the _end_ (root) of the trace */
-       if (invalidate)
+       if (invalidate && p >= e)
                rebuild_snap_realms(realm);
 
+       if (!first_realm)
+               first_realm = realm;
+       else
+               ceph_put_snap_realm(mdsc, realm);
+
+       if (p < e)
+               goto more;
+
        /*
         * queue cap snaps _after_ we've built the new snap contexts,
         * so that i_head_snapc can be set appropriately.
@@ -721,12 +735,21 @@ more:
                queue_realm_cap_snaps(realm);
        }
 
+       if (realm_ret)
+               *realm_ret = first_realm;
+       else
+               ceph_put_snap_realm(mdsc, first_realm);
+
        __cleanup_empty_realms(mdsc);
        return 0;
 
 bad:
        err = -EINVAL;
 fail:
+       if (realm && !IS_ERR(realm))
+               ceph_put_snap_realm(mdsc, realm);
+       if (first_realm)
+               ceph_put_snap_realm(mdsc, first_realm);
        pr_err("update_snap_trace error %d\n", err);
        return err;
 }
@@ -844,7 +867,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        if (IS_ERR(realm))
                                goto out;
                }
-               ceph_get_snap_realm(mdsc, realm);
 
                dout("splitting snap_realm %llx %p\n", realm->ino, realm);
                for (i = 0; i < num_split_inos; i++) {
@@ -905,7 +927,7 @@ skip_inode:
                /* we may have taken some of the old realm's children. */
                for (i = 0; i < num_split_realms; i++) {
                        struct ceph_snap_realm *child =
-                               ceph_lookup_snap_realm(mdsc,
+                               __lookup_snap_realm(mdsc,
                                           le64_to_cpu(split_realms[i]));
                        if (!child)
                                continue;
@@ -918,7 +940,7 @@ skip_inode:
         * snap, we can avoid queueing cap_snaps.
         */
        ceph_update_snap_trace(mdsc, p, e,
-                              op == CEPH_SNAP_OP_DESTROY);
+                              op == CEPH_SNAP_OP_DESTROY, NULL);
 
        if (op == CEPH_SNAP_OP_SPLIT)
                /* we took a reference when we created the realm, above */
index 5ae62587a71d5c7e4e413e1252572707433aaf7b..a63997b8bcff8ac6d5967361700d79f1b6e35c3f 100644 (file)
@@ -414,6 +414,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",noshare");
        if (opt->flags & CEPH_OPT_NOCRC)
                seq_puts(m, ",nocrc");
+       if (opt->flags & CEPH_OPT_NOMSGAUTH)
+               seq_puts(m, ",nocephx_require_signatures");
+       if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
+               seq_puts(m, ",notcp_nodelay");
 
        if (opt->name)
                seq_printf(m, ",name=%s", opt->name);
index e1aa32d0759d12c3709fb3d66c03982756d55599..04c8124ed30ecae65bf7344c96f7c984ed5a1d03 100644 (file)
@@ -693,7 +693,8 @@ extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                                struct ceph_snap_realm *realm);
 extern int ceph_update_snap_trace(struct ceph_mds_client *m,
-                                 void *p, void *e, bool deletion);
+                                 void *p, void *e, bool deletion,
+                                 struct ceph_snap_realm **realm_ret);
 extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session,
                             struct ceph_msg *msg);
@@ -892,7 +893,9 @@ extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
 int ceph_uninline_data(struct file *filp, struct page *locked_page);
 /* dir.c */
 extern const struct file_operations ceph_dir_fops;
+extern const struct file_operations ceph_snapdir_fops;
 extern const struct inode_operations ceph_dir_iops;
+extern const struct inode_operations ceph_snapdir_iops;
 extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
        ceph_snapdir_dentry_ops;
 
index 8fe1f7a21b3eaa60b27c346edfb3f0e9c2f7b25c..a94b3e67318283dd54d61fc595ecb2037ba3a515 100644 (file)
@@ -1129,7 +1129,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
        struct file_lock *flock;
        struct file_lock_context *flctx = inode->i_flctx;
-       unsigned int i;
+       unsigned int count = 0, i;
        int rc = 0, xid, type;
        struct list_head locks_to_send, *el;
        struct lock_to_push *lck, *tmp;
@@ -1140,14 +1140,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
        if (!flctx)
                goto out;
 
+       spin_lock(&flctx->flc_lock);
+       list_for_each(el, &flctx->flc_posix) {
+               count++;
+       }
+       spin_unlock(&flctx->flc_lock);
+
        INIT_LIST_HEAD(&locks_to_send);
 
        /*
-        * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
-        * can be added to the list while we are holding cinode->lock_sem that
+        * Allocating count locks is enough because no FL_POSIX locks can be
+        * added to the list while we are holding cinode->lock_sem that
         * protects locking operations of this inode.
         */
-       for (i = 0; i < flctx->flc_posix_cnt; i++) {
+       for (i = 0; i < count; i++) {
                lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
                if (!lck) {
                        rc = -ENOMEM;
index 281ee011bb6a936125cdcfdde6d8122e66447b15..60cb88c1dd2bf88bb12f3e5451e33ed51d5ac1ff 100644 (file)
@@ -304,7 +304,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
                             (const char *) old_name, (const char *)new_name);
        if (!error) {
                if (new_dentry->d_inode) {
-                       if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+                       if (d_is_dir(new_dentry)) {
                                coda_dir_drop_nlink(old_dir);
                                coda_dir_inc_nlink(new_dir);
                        }
index a315677e44d34d8501b1b03629da0201283f5b1f..b65d1ef532d52d692cc7ebbdc3c7350d1dd9d735 100644 (file)
@@ -69,14 +69,13 @@ extern struct kmem_cache *configfs_dir_cachep;
 extern int configfs_is_root(struct config_item *item);
 
 extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
-extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
+extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *));
 
 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
 extern int configfs_make_dirent(struct configfs_dirent *,
                                struct dentry *, void *, umode_t, int);
 extern int configfs_dirent_is_ready(struct configfs_dirent *);
 
-extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int);
 extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
 
 extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
index c9c298bd3058924b8fed471ccdebabeef2feeb88..cf0db005d2f58ab2ed2b42217777f643e6d0d235 100644 (file)
@@ -240,60 +240,26 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd,
        return 0;
 }
 
-static int init_dir(struct inode * inode)
+static void init_dir(struct inode * inode)
 {
        inode->i_op = &configfs_dir_inode_operations;
        inode->i_fop = &configfs_dir_operations;
 
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
-       return 0;
 }
 
-static int configfs_init_file(struct inode * inode)
+static void configfs_init_file(struct inode * inode)
 {
        inode->i_size = PAGE_SIZE;
        inode->i_fop = &configfs_file_operations;
-       return 0;
 }
 
-static int init_symlink(struct inode * inode)
+static void init_symlink(struct inode * inode)
 {
        inode->i_op = &configfs_symlink_inode_operations;
-       return 0;
-}
-
-static int create_dir(struct config_item *k, struct dentry *d)
-{
-       int error;
-       umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
-       struct dentry *p = d->d_parent;
-
-       BUG_ON(!k);
-
-       error = configfs_dirent_exists(p->d_fsdata, d->d_name.name);
-       if (!error)
-               error = configfs_make_dirent(p->d_fsdata, d, k, mode,
-                                            CONFIGFS_DIR | CONFIGFS_USET_CREATING);
-       if (!error) {
-               configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata);
-               error = configfs_create(d, mode, init_dir);
-               if (!error) {
-                       inc_nlink(p->d_inode);
-               } else {
-                       struct configfs_dirent *sd = d->d_fsdata;
-                       if (sd) {
-                               spin_lock(&configfs_dirent_lock);
-                               list_del_init(&sd->s_sibling);
-                               spin_unlock(&configfs_dirent_lock);
-                               configfs_put(sd);
-                       }
-               }
-       }
-       return error;
 }
 
-
 /**
  *     configfs_create_dir - create a directory for an config_item.
  *     @item:          config_itemwe're creating directory for.
@@ -303,11 +269,37 @@ static int create_dir(struct config_item *k, struct dentry *d)
  *     until it is validated by configfs_dir_set_ready()
  */
 
-static int configfs_create_dir(struct config_item * item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
 {
-       int error = create_dir(item, dentry);
-       if (!error)
+       int error;
+       umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+       struct dentry *p = dentry->d_parent;
+
+       BUG_ON(!item);
+
+       error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
+       if (unlikely(error))
+               return error;
+
+       error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
+                                    CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+       if (unlikely(error))
+               return error;
+
+       configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
+       error = configfs_create(dentry, mode, init_dir);
+       if (!error) {
+               inc_nlink(p->d_inode);
                item->ci_dentry = dentry;
+       } else {
+               struct configfs_dirent *sd = dentry->d_fsdata;
+               if (sd) {
+                       spin_lock(&configfs_dirent_lock);
+                       list_del_init(&sd->s_sibling);
+                       spin_unlock(&configfs_dirent_lock);
+                       configfs_put(sd);
+               }
+       }
        return error;
 }
 
index 1d1c41f1014d9039c0a3ec217d6c6e53856760b1..56d2cdc9ae0a7213f91bb5e332b1513fda887b79 100644 (file)
@@ -313,21 +313,6 @@ const struct file_operations configfs_file_operations = {
        .release        = configfs_release,
 };
 
-
-int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
-{
-       struct configfs_dirent * parent_sd = dir->d_fsdata;
-       umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
-       int error = 0;
-
-       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
-       error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
-       mutex_unlock(&dir->d_inode->i_mutex);
-
-       return error;
-}
-
-
 /**
  *     configfs_create_file - create an attribute file for an item.
  *     @item:  item we're creating for.
@@ -336,9 +321,16 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att
 
 int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
 {
-       BUG_ON(!item || !item->ci_dentry || !attr);
+       struct dentry *dir = item->ci_dentry;
+       struct configfs_dirent *parent_sd = dir->d_fsdata;
+       umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
+       int error = 0;
 
-       return configfs_add_file(item->ci_dentry, attr,
-                                CONFIGFS_ITEM_ATTR);
+       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
+       error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
+                                    CONFIGFS_ITEM_ATTR);
+       mutex_unlock(&dir->d_inode->i_mutex);
+
+       return error;
 }
 
index 65af861471541924525bcd9c04ab32a931296ba8..5423a6a6ecc8350c0284d307ac47cf6afad9238a 100644 (file)
@@ -176,7 +176,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
 
 #endif /* CONFIG_LOCKDEP */
 
-int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
+int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *))
 {
        int error = 0;
        struct inode *inode = NULL;
@@ -198,13 +198,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino
        p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
        configfs_set_inode_lock_class(sd, inode);
 
-       if (init) {
-               error = init(inode);
-               if (error) {
-                       iput(inode);
-                       return error;
-               }
-       }
+       init(inode);
        d_instantiate(dentry, inode);
        if (S_ISDIR(mode) || S_ISLNK(mode))
                dget(dentry);  /* pin link and directory dentries in core */
@@ -242,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
 
        if (dentry) {
                spin_lock(&dentry->d_lock);
-               if (!(d_unhashed(dentry) && dentry->d_inode)) {
+               if (!d_unhashed(dentry) && dentry->d_inode) {
                        dget_dlock(dentry);
                        __d_drop(dentry);
                        spin_unlock(&dentry->d_lock);
index b5c86ffd5033420523934c7153080d8cdc605bea..f319926ddf8cbc5cc90a003628bf500701ce3c25 100644 (file)
@@ -572,7 +572,7 @@ void do_coredump(const siginfo_t *siginfo)
                         *
                         * Normally core limits are irrelevant to pipes, since
                         * we're not writing to the file system, but we use
-                        * cprm.limit of 1 here as a speacial value, this is a
+                        * cprm.limit of 1 here as a special value, this is a
                         * consistent way to catch recursive crashes.
                         * We can still crash if the core_pattern binary sets
                         * RLIM_CORE = !1, but it runs as root, and can do
index dc400fd29f4d1c3c8e2265b4275aaabe4250e1fb..c71e3732e53bcebbffca749e65b7095fd4ff6e7e 100644 (file)
@@ -1659,9 +1659,25 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
 }
 EXPORT_SYMBOL(d_set_d_op);
 
+
+/*
+ * d_set_fallthru - Mark a dentry as falling through to a lower layer
+ * @dentry - The dentry to mark
+ *
+ * Mark a dentry as falling through to the lower layer (as set with
+ * d_pin_lower()).  This flag may be recorded on the medium.
+ */
+void d_set_fallthru(struct dentry *dentry)
+{
+       spin_lock(&dentry->d_lock);
+       dentry->d_flags |= DCACHE_FALLTHRU;
+       spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_set_fallthru);
+
 static unsigned d_flags_for_inode(struct inode *inode)
 {
-       unsigned add_flags = DCACHE_FILE_TYPE;
+       unsigned add_flags = DCACHE_REGULAR_TYPE;
 
        if (!inode)
                return DCACHE_MISS_TYPE;
@@ -1674,13 +1690,21 @@ static unsigned d_flags_for_inode(struct inode *inode)
                        else
                                inode->i_opflags |= IOP_LOOKUP;
                }
-       } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
-               if (unlikely(inode->i_op->follow_link))
+               goto type_determined;
+       }
+
+       if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+               if (unlikely(inode->i_op->follow_link)) {
                        add_flags = DCACHE_SYMLINK_TYPE;
-               else
-                       inode->i_opflags |= IOP_NOFOLLOW;
+                       goto type_determined;
+               }
+               inode->i_opflags |= IOP_NOFOLLOW;
        }
 
+       if (unlikely(!S_ISREG(inode->i_mode)))
+               add_flags = DCACHE_SPECIAL_TYPE;
+
+type_determined:
        if (unlikely(IS_AUTOMOUNT(inode)))
                add_flags |= DCACHE_NEED_AUTOMOUNT;
        return add_flags;
@@ -1691,7 +1715,8 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        unsigned add_flags = d_flags_for_inode(inode);
 
        spin_lock(&dentry->d_lock);
-       __d_set_type(dentry, add_flags);
+       dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+       dentry->d_flags |= add_flags;
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
        dentry->d_inode = inode;
index 45b18a5e225c3bc6fffe927cbad3257ccafc2109..96400ab42d135e7d572d29de9d4f3637fedd7817 100644 (file)
@@ -169,10 +169,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void debugfs_evict_inode(struct inode *inode)
+{
+       truncate_inode_pages_final(&inode->i_data);
+       clear_inode(inode);
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_private);
+}
+
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
+       .evict_inode    = debugfs_evict_inode,
 };
 
 static struct vfsmount *debugfs_automount(struct path *path)
@@ -511,23 +520,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
        int ret = 0;
 
        if (debugfs_positive(dentry)) {
-               if (dentry->d_inode) {
-                       dget(dentry);
-                       switch (dentry->d_inode->i_mode & S_IFMT) {
-                       case S_IFDIR:
-                               ret = simple_rmdir(parent->d_inode, dentry);
-                               break;
-                       case S_IFLNK:
-                               kfree(dentry->d_inode->i_private);
-                               /* fall through */
-                       default:
-                               simple_unlink(parent->d_inode, dentry);
-                               break;
-                       }
-                       if (!ret)
-                               d_delete(dentry);
-                       dput(dentry);
-               }
+               dget(dentry);
+               if (S_ISDIR(dentry->d_inode->i_mode))
+                       ret = simple_rmdir(parent->d_inode, dentry);
+               else
+                       simple_unlink(parent->d_inode, dentry);
+               if (!ret)
+                       d_delete(dentry);
+               dput(dentry);
        }
        return ret;
 }
@@ -690,7 +690,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
        }
        d_move(old_dentry, dentry);
        fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
-               S_ISDIR(old_dentry->d_inode->i_mode),
+               d_is_dir(old_dentry),
                NULL, old_dentry);
        fsnotify_oldname_free(old_name);
        unlock_rename(new_dir, old_dir);
index 6f4e659f508f303bdadcc8922b2cc1a1397bb51b..b07731e68c0b4d39cf75a5840033638cf37c123f 100644 (file)
@@ -230,7 +230,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
        }
        ecryptfs_set_file_lower(
                file, ecryptfs_inode_to_private(inode)->lower_file);
-       if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+       if (d_is_dir(ecryptfs_dentry)) {
                ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
                mutex_lock(&crypt_stat->cs_mutex);
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
index 34b36a5040593e960ff3da4fdc20dcaa5c5cb8f6..b08b5187f6622cb6c7934d6dba9fcdfdbb0aefb6 100644 (file)
@@ -907,9 +907,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
        lower_inode = ecryptfs_inode_to_lower(inode);
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        mutex_lock(&crypt_stat->cs_mutex);
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-       else if (S_ISREG(dentry->d_inode->i_mode)
+       else if (d_is_reg(dentry)
                 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
                     || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
                struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
index fdfd206c737a39d20853ebe069ac7dc706f61dd6..714cd37a6ba30fd970b8384a2c2b26fc5209351f 100644 (file)
@@ -429,7 +429,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
        if (IS_ERR(result))
                return result;
 
-       if (S_ISDIR(result->d_inode->i_mode)) {
+       if (d_is_dir(result)) {
                /*
                 * This request is for a directory.
                 *
index 982d934fd9ac98338377d3b1621b3d577531b6e6..f63c3d5805c4c156ad3ed412cbecf85e700cf9d2 100644 (file)
@@ -364,7 +364,8 @@ struct flex_groups {
 #define EXT4_DIRTY_FL                  0x00000100
 #define EXT4_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
 #define EXT4_NOCOMPR_FL                        0x00000400 /* Don't compress */
-#define EXT4_ECOMPR_FL                 0x00000800 /* Compression error */
+       /* nb: was previously EXT2_ECOMPR_FL */
+#define EXT4_ENCRYPT_FL                        0x00000800 /* encrypted file */
 /* End compression flags --- maybe not all used */
 #define EXT4_INDEX_FL                  0x00001000 /* hash-indexed directory */
 #define EXT4_IMAGIC_FL                 0x00002000 /* AFS directory */
@@ -421,7 +422,7 @@ enum {
        EXT4_INODE_DIRTY        = 8,
        EXT4_INODE_COMPRBLK     = 9,    /* One or more compressed clusters */
        EXT4_INODE_NOCOMPR      = 10,   /* Don't compress */
-       EXT4_INODE_ECOMPR       = 11,   /* Compression error */
+       EXT4_INODE_ENCRYPT      = 11,   /* Compression error */
 /* End compression flags --- maybe not all used */
        EXT4_INODE_INDEX        = 12,   /* hash-indexed directory */
        EXT4_INODE_IMAGIC       = 13,   /* AFS directory */
@@ -466,7 +467,7 @@ static inline void ext4_check_flag_values(void)
        CHECK_FLAG_VALUE(DIRTY);
        CHECK_FLAG_VALUE(COMPRBLK);
        CHECK_FLAG_VALUE(NOCOMPR);
-       CHECK_FLAG_VALUE(ECOMPR);
+       CHECK_FLAG_VALUE(ENCRYPT);
        CHECK_FLAG_VALUE(INDEX);
        CHECK_FLAG_VALUE(IMAGIC);
        CHECK_FLAG_VALUE(JOURNAL_DATA);
@@ -1048,6 +1049,12 @@ extern void ext4_set_bits(void *bm, int cur, int len);
 /* Metadata checksum algorithm codes */
 #define EXT4_CRC32C_CHKSUM             1
 
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID           0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS       1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM       2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC       3
+
 /*
  * Structure of the super block
  */
@@ -1161,7 +1168,8 @@ struct ext4_super_block {
        __le32  s_grp_quota_inum;       /* inode for tracking group quota */
        __le32  s_overhead_clusters;    /* overhead blocks/clusters in fs */
        __le32  s_backup_bgs[2];        /* groups with sparse_super2 SBs */
-       __le32  s_reserved[106];        /* Padding to the end of the block */
+       __u8    s_encrypt_algos[4];     /* Encryption algorithms in use  */
+       __le32  s_reserved[105];        /* Padding to the end of the block */
        __le32  s_checksum;             /* crc32c(superblock) */
 };
 
@@ -1527,6 +1535,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
  * GDT_CSUM bits are mutually exclusive.
  */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM   0x0400
+#define EXT4_FEATURE_RO_COMPAT_READONLY                0x1000
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE         0x0002
@@ -1542,6 +1551,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */
 #define EXT4_FEATURE_INCOMPAT_LARGEDIR         0x4000 /* >2GB or 3-lvl htree */
 #define EXT4_FEATURE_INCOMPAT_INLINE_DATA      0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_ENCRYPT          0x10000
 
 #define EXT2_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
index 6b9878a24182b06125cb496ef973ff0d9b739106..45fe924f82bce2ff76e3e74b45ec1833729433ea 100644 (file)
@@ -1401,10 +1401,7 @@ end_range:
                                 * to free. Everything was covered by the start
                                 * of the range.
                                 */
-                               return 0;
-                       } else {
-                               /* Shared branch grows from an indirect block */
-                               partial2--;
+                               goto do_indirects;
                        }
                } else {
                        /*
@@ -1435,56 +1432,96 @@ end_range:
        /* Punch happened within the same level (n == n2) */
        partial = ext4_find_shared(inode, n, offsets, chain, &nr);
        partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
-       /*
-        * ext4_find_shared returns Indirect structure which
-        * points to the last element which should not be
-        * removed by truncate. But this is end of the range
-        * in punch_hole so we need to point to the next element
-        */
-       partial2->p++;
-       while ((partial > chain) || (partial2 > chain2)) {
-               /* We're at the same block, so we're almost finished */
-               if ((partial->bh && partial2->bh) &&
-                   (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
-                       if ((partial > chain) && (partial2 > chain2)) {
+
+       /* Free top, but only if partial2 isn't its subtree. */
+       if (nr) {
+               int level = min(partial - chain, partial2 - chain2);
+               int i;
+               int subtree = 1;
+
+               for (i = 0; i <= level; i++) {
+                       if (offsets[i] != offsets2[i]) {
+                               subtree = 0;
+                               break;
+                       }
+               }
+
+               if (!subtree) {
+                       if (partial == chain) {
+                               /* Shared branch grows from the inode */
+                               ext4_free_branches(handle, inode, NULL,
+                                                  &nr, &nr+1,
+                                                  (chain+n-1) - partial);
+                               *partial->p = 0;
+                       } else {
+                               /* Shared branch grows from an indirect block */
+                               BUFFER_TRACE(partial->bh, "get_write_access");
                                ext4_free_branches(handle, inode, partial->bh,
-                                                  partial->p + 1,
-                                                  partial2->p,
+                                                  partial->p,
+                                                  partial->p+1,
                                                   (chain+n-1) - partial);
-                               BUFFER_TRACE(partial->bh, "call brelse");
-                               brelse(partial->bh);
-                               BUFFER_TRACE(partial2->bh, "call brelse");
-                               brelse(partial2->bh);
                        }
-                       return 0;
                }
+       }
+
+       if (!nr2) {
                /*
-                * Clear the ends of indirect blocks on the shared branch
-                * at the start of the range
+                * ext4_find_shared returns Indirect structure which
+                * points to the last element which should not be
+                * removed by truncate. But this is end of the range
+                * in punch_hole so we need to point to the next element
                 */
-               if (partial > chain) {
+               partial2->p++;
+       }
+
+       while (partial > chain || partial2 > chain2) {
+               int depth = (chain+n-1) - partial;
+               int depth2 = (chain2+n2-1) - partial2;
+
+               if (partial > chain && partial2 > chain2 &&
+                   partial->bh->b_blocknr == partial2->bh->b_blocknr) {
+                       /*
+                        * We've converged on the same block. Clear the range,
+                        * then we're done.
+                        */
                        ext4_free_branches(handle, inode, partial->bh,
-                                  partial->p + 1,
-                                  (__le32 *)partial->bh->b_data+addr_per_block,
-                                  (chain+n-1) - partial);
+                                          partial->p + 1,
+                                          partial2->p,
+                                          (chain+n-1) - partial);
                        BUFFER_TRACE(partial->bh, "call brelse");
                        brelse(partial->bh);
-                       partial--;
+                       BUFFER_TRACE(partial2->bh, "call brelse");
+                       brelse(partial2->bh);
+                       return 0;
                }
+
                /*
-                * Clear the ends of indirect blocks on the shared branch
-                * at the end of the range
+                * The start and end partial branches may not be at the same
+                * level even though the punch happened within one level. So, we
+                * give them a chance to arrive at the same level, then walk
+                * them in step with each other until we converge on the same
+                * block.
                 */
-               if (partial2 > chain2) {
+               if (partial > chain && depth <= depth2) {
+                       ext4_free_branches(handle, inode, partial->bh,
+                                          partial->p + 1,
+                                          (__le32 *)partial->bh->b_data+addr_per_block,
+                                          (chain+n-1) - partial);
+                       BUFFER_TRACE(partial->bh, "call brelse");
+                       brelse(partial->bh);
+                       partial--;
+               }
+               if (partial2 > chain2 && depth2 <= depth) {
                        ext4_free_branches(handle, inode, partial2->bh,
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
-                                          (chain2+n-1) - partial2);
+                                          (chain2+n2-1) - partial2);
                        BUFFER_TRACE(partial2->bh, "call brelse");
                        brelse(partial2->bh);
                        partial2--;
                }
        }
+       return 0;
 
 do_indirects:
        /* Kill the remaining (whole) subtrees */
index 85404f15e53a28860ce5a7be08220106b76fc06f..5cb9a212b86f3efd69ca604df07dc20b901dabb1 100644 (file)
@@ -1024,6 +1024,7 @@ static int ext4_write_end(struct file *file,
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
 
@@ -1054,6 +1055,8 @@ static int ext4_write_end(struct file *file,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
        /*
         * Don't mark the inode dirty under page lock. First, it unnecessarily
         * makes the holding time of page lock longer. Second, it forces lock
@@ -1095,6 +1098,7 @@ static int ext4_journalled_write_end(struct file *file,
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int partial = 0;
        unsigned from, to;
@@ -1127,6 +1131,9 @@ static int ext4_journalled_write_end(struct file *file,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
+
        if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
index 1adac6868e6fd0e97f91fa871ed45288fffc5cb6..e061e66c82800f700b7642e4c82fa2cc836be05f 100644 (file)
@@ -2779,6 +2779,12 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
        if (readonly)
                return 1;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) {
+               ext4_msg(sb, KERN_INFO, "filesystem is read-only");
+               sb->s_flags |= MS_RDONLY;
+               return 1;
+       }
+
        /* Check that feature set is OK for a read-write mount */
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
                ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
@@ -3936,9 +3942,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
-       init_timer(&sbi->s_err_report);
-       sbi->s_err_report.function = print_daily_error_info;
-       sbi->s_err_report.data = (unsigned long) sb;
+       setup_timer(&sbi->s_err_report, print_daily_error_info,
+               (unsigned long) sb);
 
        /* Register extent status tree shrinker */
        if (ext4_es_register_shrinker(sbi))
@@ -4866,9 +4871,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_journal && sbi->s_journal->j_task->io_context)
                journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
                err = -EINVAL;
                goto restore_opts;
@@ -4877,17 +4879,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
            test_opt(sb, JOURNAL_CHECKSUM)) {
                ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-                        "during remount not supported");
-               err = -EINVAL;
-               goto restore_opts;
-       }
-
-       if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
-           test_opt(sb, JOURNAL_CHECKSUM)) {
-               ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-                        "during remount not supported");
-               err = -EINVAL;
-               goto restore_opts;
+                        "during remount not supported; ignoring");
+               sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
        }
 
        if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
@@ -4963,7 +4956,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                ext4_mark_recovery_complete(sb, es);
                } else {
                        /* Make sure we can mount this feature set readwrite */
-                       if (!ext4_feature_set_ok(sb, 0)) {
+                       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_READONLY) ||
+                           !ext4_feature_set_ok(sb, 0)) {
                                err = -EROFS;
                                goto restore_opts;
                        }
index 073657f755d4a5b9c0d08acca991fbd35a9f37f0..e907052eeadb69f683df3c7d8838106c6e36905b 100644 (file)
@@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                struct inode *inode = wb_inode(wb->b_io.prev);
                struct super_block *sb = inode->i_sb;
 
-               if (!grab_super_passive(sb)) {
+               if (!trylock_super(sb)) {
                        /*
-                        * grab_super_passive() may fail consistently due to
+                        * trylock_super() may fail consistently due to
                         * s_umount being grabbed by someone else. Don't use
                         * requeue_io() to avoid busy retrying the inode/sb.
                         */
@@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                        continue;
                }
                wrote += writeback_sb_inodes(sb, wb, work);
-               drop_super(sb);
+               up_read(&sb->s_umount);
 
                /* refer to the same tests at the end of writeback_sb_inodes */
                if (wrote) {
index 08e7b1a9d5d0edaca8b94ef386d9200078958df3..1545b711ddcfdc925410b3b553b6597b93c71a25 100644 (file)
@@ -971,7 +971,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
                        err = -EBUSY;
                        goto badentry;
                }
-               if (S_ISDIR(entry->d_inode->i_mode)) {
+               if (d_is_dir(entry)) {
                        shrink_dcache_parent(entry);
                        if (!simple_empty(entry)) {
                                err = -ENOTEMPTY;
index 6371192961e2260cbbb9976ad1c636d89b92cee6..487527b42d94a381d329d8be5ae4459cafabe15b 100644 (file)
@@ -1809,7 +1809,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
                gfs2_consist_inode(dip);
        dip->i_entries--;
        dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                drop_nlink(&dip->i_inode);
        mark_inode_dirty(&dip->i_inode);
 
index 435bea231cc6e83031976a3974bbf24d81a0b8ab..f0235c1640af7ec29edb92541f66dfc739c9b0db 100644 (file)
@@ -530,7 +530,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (d_is_dir(new_dentry))
                        res = hfsplus_rmdir(new_dir, new_dentry);
                else
                        res = hfsplus_unlink(new_dir, new_dentry);
index 5f2755117ce775dea45cb5c2bbc369568f991f29..043ac9d77262a858464adad185969109902c9479 100644 (file)
@@ -678,10 +678,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
                return NULL;
        }
 
-       if (S_ISDIR(dentry->d_inode->i_mode)) {
+       if (d_is_dir(dentry)) {
                inode->i_op = &hppfs_dir_iops;
                inode->i_fop = &hppfs_dir_fops;
-       } else if (S_ISLNK(dentry->d_inode->i_mode)) {
+       } else if (d_is_symlink(dentry)) {
                inode->i_op = &hppfs_link_iops;
                inode->i_fop = &hppfs_file_fops;
        } else {
index 30459dab409dd5f8b08f22d69b589f9c91dd2678..01dce1d1476b7bc93633f787e989c464c5f2ef58 100644 (file)
@@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void);
  * super.c
  */
 extern int do_remount_sb(struct super_block *, int, void *, int);
-extern bool grab_super_passive(struct super_block *sb);
+extern bool trylock_super(struct super_block *sb);
 extern struct dentry *mount_fs(struct file_system_type *,
                               int, const char *, void *);
 extern struct super_block *user_get_super(dev_t);
index bcbef08a4d8fc8873994eb37d35881626f05af70..b5128c6e63ad6644d19bf861a062d63f48265a4d 100644 (file)
@@ -524,6 +524,9 @@ static int do_one_pass(journal_t *journal,
                        if (descr_csum_size > 0 &&
                            !jbd2_descr_block_csum_verify(journal,
                                                          bh->b_data)) {
+                               printk(KERN_ERR "JBD2: Invalid checksum "
+                                      "recovering block %lu in log\n",
+                                      next_log_block);
                                err = -EIO;
                                brelse(bh);
                                goto failed;
index 92e0644bf8673d91c091edf437e5a8e5e2941d64..556de100ebd5a5318fec58cc47c6187315ed2b7b 100644 (file)
@@ -84,11 +84,6 @@ static inline int pullbit(struct pushpull *pp)
        return bit;
 }
 
-static inline int pulledbits(struct pushpull *pp)
-{
-       return pp->ofs;
-}
-
 
 static void init_rubin(struct rubin_state *rs, int div, int *bits)
 {
index 938556025d643349b5166e54b5cfc55ea4767dbe..f21b6fb5e4c42f219022edb51c84a0d0674bd4d2 100644 (file)
@@ -252,7 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
        if (!f->inocache)
                return -EIO;
 
-       if (S_ISDIR(old_dentry->d_inode->i_mode))
+       if (d_is_dir(old_dentry))
                return -EPERM;
 
        /* XXX: This is ugly */
@@ -772,7 +772,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
         */
        if (new_dentry->d_inode) {
                victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
-               if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+               if (d_is_dir(new_dentry)) {
                        struct jffs2_full_dirent *fd;
 
                        mutex_lock(&victim_f->sem);
@@ -807,7 +807,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
        if (victim_f) {
                /* There was a victim. Kill it off nicely */
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (d_is_dir(new_dentry))
                        clear_nlink(new_dentry->d_inode);
                else
                        drop_nlink(new_dentry->d_inode);
@@ -815,7 +815,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                   inode which didn't exist. */
                if (victim_f->inocache) {
                        mutex_lock(&victim_f->sem);
-                       if (S_ISDIR(new_dentry->d_inode->i_mode))
+                       if (d_is_dir(new_dentry))
                                victim_f->inocache->pino_nlink = 0;
                        else
                                victim_f->inocache->pino_nlink--;
@@ -825,7 +825,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
        /* If it was a directory we moved, and there was no victim,
           increase i_nlink on its new parent */
-       if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+       if (d_is_dir(old_dentry) && !victim_f)
                inc_nlink(new_dir_i);
 
        /* Unlink the original */
@@ -839,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
                mutex_lock(&f->sem);
                inc_nlink(old_dentry->d_inode);
-               if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode))
+               if (f->inocache && !d_is_dir(old_dentry))
                        f->inocache->pino_nlink++;
                mutex_unlock(&f->sem);
 
@@ -852,7 +852,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                return ret;
        }
 
-       if (S_ISDIR(old_dentry->d_inode->i_mode))
+       if (d_is_dir(old_dentry))
                drop_nlink(old_dir_i);
 
        new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
index 7654e87b042869ef43aff269a10e88a4088d59c3..9ad5ba4b299be2f41cca834e97480a309c4e7b16 100644 (file)
@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
                                sumlen = c->sector_size - je32_to_cpu(sm->offset);
                                sumptr = buf + buf_size - sumlen;
 
+                               /* sm->offset maybe wrong but MAGIC maybe right */
+                               if (sumlen > c->sector_size)
+                                       goto full_scan;
+
                                /* Now, make sure the summary itself is available */
                                if (sumlen > buf_size) {
                                        /* Need to kmalloc for this. */
@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
                }
        }
 
+full_scan:
        buf_ofs = jeb->offset;
 
        if (!buf_size) {
index 0918f0e2e26608467356235c4267c336cccee87c..3d76f28a2ba9dc1d9e4b1d5b9dfde57af1b983e6 100644 (file)
@@ -138,7 +138,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
        struct jffs2_inode_info *f;
        uint32_t pino;
 
-       BUG_ON(!S_ISDIR(child->d_inode->i_mode));
+       BUG_ON(!d_is_dir(child));
 
        f = JFFS2_INODE_INFO(child->d_inode);
 
index b2ffdb045be42c1c5c476d9298aacd98045f47f3..0ab65122ee45405bfcc96463a3de2b228b36e283 100644 (file)
@@ -329,7 +329,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct inode *new_dir, struct dentry *new_dentry)
 {
        struct inode *inode = old_dentry->d_inode;
-       int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
+       int they_are_dirs = d_is_dir(old_dentry);
 
        if (!simple_empty(new_dentry))
                return -ENOTEMPTY;
index 4753218f308ed32ee8b09123b11bc9cba42a6af4..365c82e1b3a9a602057e65edc9b857c6adce6b76 100644 (file)
@@ -681,21 +681,18 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
 }
 
 static void
-locks_insert_lock_ctx(struct file_lock *fl, int *counter,
-                     struct list_head *before)
+locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
 {
        fl->fl_nspid = get_pid(task_tgid(current));
        list_add_tail(&fl->fl_list, before);
-       ++*counter;
        locks_insert_global_locks(fl);
 }
 
 static void
-locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
+locks_unlink_lock_ctx(struct file_lock *fl)
 {
        locks_delete_global_locks(fl);
        list_del_init(&fl->fl_list);
-       --*counter;
        if (fl->fl_nspid) {
                put_pid(fl->fl_nspid);
                fl->fl_nspid = NULL;
@@ -704,10 +701,9 @@ locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
 }
 
 static void
-locks_delete_lock_ctx(struct file_lock *fl, int *counter,
-                     struct list_head *dispose)
+locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
 {
-       locks_unlink_lock_ctx(fl, counter);
+       locks_unlink_lock_ctx(fl);
        if (dispose)
                list_add(&fl->fl_list, dispose);
        else
@@ -895,7 +891,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
                if (request->fl_type == fl->fl_type)
                        goto out;
                found = true;
-               locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose);
+               locks_delete_lock_ctx(fl, &dispose);
                break;
        }
 
@@ -905,16 +901,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
                goto out;
        }
 
-       /*
-        * If a higher-priority process was blocked on the old file lock,
-        * give it the opportunity to lock the file.
-        */
-       if (found) {
-               spin_unlock(&ctx->flc_lock);
-               cond_resched();
-               spin_lock(&ctx->flc_lock);
-       }
-
 find_conflict:
        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
                if (!flock_locks_conflict(request, fl))
@@ -929,7 +915,7 @@ find_conflict:
        if (request->fl_flags & FL_ACCESS)
                goto out;
        locks_copy_lock(new_fl, request);
-       locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock);
+       locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
        new_fl = NULL;
        error = 0;
 
@@ -1046,8 +1032,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        else
                                request->fl_end = fl->fl_end;
                        if (added) {
-                               locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt,
-                                                       &dispose);
+                               locks_delete_lock_ctx(fl, &dispose);
                                continue;
                        }
                        request = fl;
@@ -1076,8 +1061,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                                 * one (This may happen several times).
                                 */
                                if (added) {
-                                       locks_delete_lock_ctx(fl,
-                                               &ctx->flc_posix_cnt, &dispose);
+                                       locks_delete_lock_ctx(fl, &dispose);
                                        continue;
                                }
                                /*
@@ -1093,10 +1077,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                                locks_copy_lock(new_fl, request);
                                request = new_fl;
                                new_fl = NULL;
-                               locks_insert_lock_ctx(request,
-                                       &ctx->flc_posix_cnt, &fl->fl_list);
-                               locks_delete_lock_ctx(fl,
-                                       &ctx->flc_posix_cnt, &dispose);
+                               locks_insert_lock_ctx(request, &fl->fl_list);
+                               locks_delete_lock_ctx(fl, &dispose);
                                added = true;
                        }
                }
@@ -1124,8 +1106,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        goto out;
                }
                locks_copy_lock(new_fl, request);
-               locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt,
-                                       &fl->fl_list);
+               locks_insert_lock_ctx(new_fl, &fl->fl_list);
+               fl = new_fl;
                new_fl = NULL;
        }
        if (right) {
@@ -1136,8 +1118,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        left = new_fl2;
                        new_fl2 = NULL;
                        locks_copy_lock(left, right);
-                       locks_insert_lock_ctx(left, &ctx->flc_posix_cnt,
-                                               &fl->fl_list);
+                       locks_insert_lock_ctx(left, &fl->fl_list);
                }
                right->fl_start = request->fl_end + 1;
                locks_wake_up_blocks(right);
@@ -1321,7 +1302,6 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
 /* We already had a lease on this file; just change its type */
 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
 {
-       struct file_lock_context *flctx;
        int error = assign_type(fl, arg);
 
        if (error)
@@ -1331,7 +1311,6 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
        if (arg == F_UNLCK) {
                struct file *filp = fl->fl_file;
 
-               flctx = file_inode(filp)->i_flctx;
                f_delown(filp);
                filp->f_owner.signum = 0;
                fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1339,7 +1318,7 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
                        printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
                        fl->fl_fasync = NULL;
                }
-               locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose);
+               locks_delete_lock_ctx(fl, dispose);
        }
        return 0;
 }
@@ -1456,8 +1435,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
                        fl->fl_downgrade_time = break_time;
                }
                if (fl->fl_lmops->lm_break(fl))
-                       locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt,
-                                               &dispose);
+                       locks_delete_lock_ctx(fl, &dispose);
        }
 
        if (list_empty(&ctx->flc_lease))
@@ -1697,7 +1675,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
        if (!leases_enable)
                goto out;
 
-       locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease);
+       locks_insert_lock_ctx(lease, &ctx->flc_lease);
        /*
         * The check in break_lease() is lockless. It's possible for another
         * open to race in after we did the earlier check for a conflicting
@@ -1710,7 +1688,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
        smp_mb();
        error = check_conflicting_open(dentry, arg, lease->fl_flags);
        if (error) {
-               locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt);
+               locks_unlink_lock_ctx(lease);
                goto out;
        }
 
@@ -2448,7 +2426,8 @@ locks_remove_lease(struct file *filp)
 
        spin_lock(&ctx->flc_lock);
        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
-               lease_modify(fl, F_UNLCK, &dispose);
+               if (filp == fl->fl_file)
+                       lease_modify(fl, F_UNLCK, &dispose);
        spin_unlock(&ctx->flc_lock);
        locks_dispose_list(&dispose);
 }
index 96ca11dea4a20c56b89ca126274b68089235346f..c83145af4bfc0ea9bb159002e3545e8a8cd65157 100644 (file)
@@ -2814,7 +2814,7 @@ no_open:
                        } else if (!dentry->d_inode) {
                                goto out;
                        } else if ((open_flag & O_TRUNC) &&
-                                  S_ISREG(dentry->d_inode->i_mode)) {
+                                  d_is_reg(dentry)) {
                                goto out;
                        }
                        /* will fail later, go on to get the right error */
index 72a286e0d33eb37a2ff3cc8a33f7ca5cefcca266..82ef1405260e1cfbe551ffba781fec25f881e918 100644 (file)
@@ -1907,8 +1907,8 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
        if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
                return -EINVAL;
 
-       if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
-             S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
+       if (d_is_dir(mp->m_dentry) !=
+             d_is_dir(mnt->mnt.mnt_root))
                return -ENOTDIR;
 
        return attach_recursive_mnt(mnt, p, mp, NULL);
@@ -2180,8 +2180,8 @@ static int do_move_mount(struct path *path, const char *old_name)
        if (!mnt_has_parent(old))
                goto out1;
 
-       if (S_ISDIR(path->dentry->d_inode->i_mode) !=
-             S_ISDIR(old_path.dentry->d_inode->i_mode))
+       if (d_is_dir(path->dentry) !=
+             d_is_dir(old_path.dentry))
                goto out1;
        /*
         * Don't move a mount residing in a shared parent.
@@ -2271,7 +2271,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
                goto unlock;
 
        err = -EINVAL;
-       if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
+       if (d_is_symlink(newmnt->mnt.mnt_root))
                goto unlock;
 
        newmnt->mnt.mnt_flags = mnt_flags;
index e36a9d78ea49adc63329253a0a94cb22d6be933a..197806fb87ffb459c19f3c4bbc8da50c58c870dc 100644 (file)
@@ -427,6 +427,8 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
        if (clp == NULL)
                goto out;
 
+       if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+               goto out;
        tbl = &clp->cl_session->bc_slot_table;
 
        spin_lock(&tbl->slot_tbl_lock);
index f4ccfe6521ec80f80fd4b9096bcc7ed49ab7d795..19ca95cdfd9b0f26aedbbc23f036babf2aeca67a 100644 (file)
@@ -313,7 +313,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
                goto out;
        }
 
-       args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
+       args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
        if (!args->devs) {
                status = htonl(NFS4ERR_DELAY);
                goto out;
@@ -415,7 +415,7 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
                             rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
                if (unlikely(p == NULL))
                        goto out;
-               rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+               rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
                                                sizeof(*rc_list->rcl_refcalls),
                                                GFP_KERNEL);
                if (unlikely(rc_list->rcl_refcalls == NULL))
@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
 
                for (i = 0; i < args->csa_nrclists; i++) {
                        status = decode_rc_list(xdr, &args->csa_rclists[i]);
-                       if (status)
+                       if (status) {
+                               args->csa_nrclists = i;
                                goto out_free;
+                       }
                }
        }
        status = 0;
index da5433230bb1960bc78bafc17ced89f3bca76b65..a1f0685b42ff7d2e42eed249e178ecdbbee7befc 100644 (file)
@@ -180,7 +180,6 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
                        delegation->cred = get_rpccred(cred);
                        clear_bit(NFS_DELEGATION_NEED_RECLAIM,
                                  &delegation->flags);
-                       NFS_I(inode)->delegation_state = delegation->type;
                        spin_unlock(&delegation->lock);
                        put_rpccred(oldcred);
                        rcu_read_unlock();
@@ -275,7 +274,6 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
        set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
        list_del_rcu(&delegation->super_list);
        delegation->inode = NULL;
-       nfsi->delegation_state = 0;
        rcu_assign_pointer(nfsi->delegation, NULL);
        spin_unlock(&delegation->lock);
        return delegation;
@@ -355,7 +353,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
                                        &delegation->stateid)) {
                        nfs_update_inplace_delegation(old_delegation,
                                        delegation);
-                       nfsi->delegation_state = old_delegation->type;
                        goto out;
                }
                /*
@@ -379,7 +376,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
                        goto out;
        }
        list_add_rcu(&delegation->super_list, &server->delegations);
-       nfsi->delegation_state = delegation->type;
        rcu_assign_pointer(nfsi->delegation, delegation);
        delegation = NULL;
 
index 7077521acdf4609cc57f54da4ce11bb331493632..e907c8cf732e3cff6bc9711ccf0b20c9261cdca2 100644 (file)
@@ -283,7 +283,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
                              struct nfs_direct_req *dreq)
 {
-       cinfo->lock = &dreq->lock;
+       cinfo->lock = &dreq->inode->i_lock;
        cinfo->mds = &dreq->mds_cinfo;
        cinfo->ds = &dreq->ds_cinfo;
        cinfo->dreq = dreq;
index 7ae1c263c5cf03b8d63f1fec359d794174564020..91e88a7ecef0c64354b0278fc01381e0970d2086 100644 (file)
@@ -960,52 +960,19 @@ filelayout_mark_request_commit(struct nfs_page *req,
 {
        struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
        u32 i, j;
-       struct list_head *list;
-       struct pnfs_commit_bucket *buckets;
 
        if (fl->commit_through_mds) {
-               list = &cinfo->mds->list;
-               spin_lock(cinfo->lock);
-               goto mds_commit;
-       }
-
-       /* Note that we are calling nfs4_fl_calc_j_index on each page
-        * that ends up being committed to a data server.  An attractive
-        * alternative is to add a field to nfs_write_data and nfs_page
-        * to store the value calculated in filelayout_write_pagelist
-        * and just use that here.
-        */
-       j = nfs4_fl_calc_j_index(lseg, req_offset(req));
-       i = select_bucket_index(fl, j);
-       spin_lock(cinfo->lock);
-       buckets = cinfo->ds->buckets;
-       list = &buckets[i].written;
-       if (list_empty(list)) {
-               /* Non-empty buckets hold a reference on the lseg.  That ref
-                * is normally transferred to the COMMIT call and released
-                * there.  It could also be released if the last req is pulled
-                * off due to a rewrite, in which case it will be done in
-                * pnfs_generic_clear_request_commit
+               nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
+       } else {
+               /* Note that we are calling nfs4_fl_calc_j_index on each page
+                * that ends up being committed to a data server.  An attractive
+                * alternative is to add a field to nfs_write_data and nfs_page
+                * to store the value calculated in filelayout_write_pagelist
+                * and just use that here.
                 */
-               buckets[i].wlseg = pnfs_get_lseg(lseg);
-       }
-       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-       cinfo->ds->nwritten++;
-
-mds_commit:
-       /* nfs_request_add_commit_list(). We need to add req to list without
-        * dropping cinfo lock.
-        */
-       set_bit(PG_CLEAN, &(req)->wb_flags);
-       nfs_list_add_request(req, list);
-       cinfo->mds->ncommit++;
-       spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
+               j = nfs4_fl_calc_j_index(lseg, req_offset(req));
+               i = select_bucket_index(fl, j);
+               pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
        }
 }
 
index c22ecaa86c1c27cc2138f1853c27757b11104f17..315cc68945b9d1d3ad00b04e2597e38d7b7c4cab 100644 (file)
@@ -1332,47 +1332,6 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
        return PNFS_ATTEMPTED;
 }
 
-static void
-ff_layout_mark_request_commit(struct nfs_page *req,
-                             struct pnfs_layout_segment *lseg,
-                             struct nfs_commit_info *cinfo,
-                             u32 ds_commit_idx)
-{
-       struct list_head *list;
-       struct pnfs_commit_bucket *buckets;
-
-       spin_lock(cinfo->lock);
-       buckets = cinfo->ds->buckets;
-       list = &buckets[ds_commit_idx].written;
-       if (list_empty(list)) {
-               /* Non-empty buckets hold a reference on the lseg.  That ref
-                * is normally transferred to the COMMIT call and released
-                * there.  It could also be released if the last req is pulled
-                * off due to a rewrite, in which case it will be done in
-                * pnfs_common_clear_request_commit
-                */
-               WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
-               buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
-       }
-       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-       cinfo->ds->nwritten++;
-
-       /* nfs_request_add_commit_list(). We need to add req to list without
-        * dropping cinfo lock.
-        */
-       set_bit(PG_CLEAN, &(req)->wb_flags);
-       nfs_list_add_request(req, list);
-       cinfo->mds->ncommit++;
-       spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
-       }
-}
-
 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
 {
        return i;
@@ -1540,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
        .pg_write_ops           = &ff_layout_pg_write_ops,
        .get_ds_info            = ff_layout_get_ds_info,
        .free_deviceid_node     = ff_layout_free_deveiceid_node,
-       .mark_request_commit    = ff_layout_mark_request_commit,
+       .mark_request_commit    = pnfs_layout_mark_request_commit,
        .clear_request_commit   = pnfs_generic_clear_request_commit,
        .scan_commit_lists      = pnfs_generic_scan_commit_lists,
        .recover_commit_reqs    = pnfs_generic_recover_commit_reqs,
index e4f0dcef8f5455e60676bf70d9d0489107ad7c79..83107be3dd0109ab54c5f2b0c72fa59c64dd8d57 100644 (file)
@@ -1775,7 +1775,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
 #if IS_ENABLED(CONFIG_NFS_V4)
        INIT_LIST_HEAD(&nfsi->open_states);
        nfsi->delegation = NULL;
-       nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
 #endif
index 212b8c883d22881b4258c2c7f7e611d9d85f0427..b802fb3a2d99ffd76a57aecc6ce6739ca104e515 100644 (file)
@@ -597,6 +597,19 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
 }
 
+/*
+ * Record the page as unstable and mark its inode as dirty.
+ */
+static inline
+void nfs_mark_page_unstable(struct page *page)
+{
+       struct inode *inode = page_file_mapping(page)->host;
+
+       inc_zone_page_state(page, NR_UNSTABLE_NFS);
+       inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE);
+        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+}
+
 /*
  * Determine the number of bytes of data the page contains
  */
index 2e7c9f7a6f7cc8369bfcefda7a202d34f7a9dbe5..88180ac5ea0eebdf34aa333130e69d7a7b49c38e 100644 (file)
@@ -6648,47 +6648,47 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
 {
        int status;
+       struct nfs41_bind_conn_to_session_args args = {
+               .client = clp,
+               .dir = NFS4_CDFC4_FORE_OR_BOTH,
+       };
        struct nfs41_bind_conn_to_session_res res;
        struct rpc_message msg = {
                .rpc_proc =
                        &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
-               .rpc_argp = clp,
+               .rpc_argp = &args,
                .rpc_resp = &res,
                .rpc_cred = cred,
        };
 
        dprintk("--> %s\n", __func__);
 
-       res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
-       if (unlikely(res.session == NULL)) {
-               status = -ENOMEM;
-               goto out;
-       }
+       nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
+       if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+               args.dir = NFS4_CDFC4_FORE;
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        trace_nfs4_bind_conn_to_session(clp, status);
        if (status == 0) {
-               if (memcmp(res.session->sess_id.data,
+               if (memcmp(res.sessionid.data,
                    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
                        dprintk("NFS: %s: Session ID mismatch\n", __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
-               if (res.dir != NFS4_CDFS4_BOTH) {
+               if ((res.dir & args.dir) != res.dir || res.dir == 0) {
                        dprintk("NFS: %s: Unexpected direction from server\n",
                                __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
-               if (res.use_conn_in_rdma_mode) {
+               if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
                        dprintk("NFS: %s: Server returned RDMA mode = true\n",
                                __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
        }
-out_session:
-       kfree(res.session);
 out:
        dprintk("<-- %s status= %d\n", __func__, status);
        return status;
@@ -7166,10 +7166,11 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
                args->bc_attrs.max_reqs);
 }
 
-static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
+               struct nfs41_create_session_res *res)
 {
        struct nfs4_channel_attrs *sent = &args->fc_attrs;
-       struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+       struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
 
        if (rcvd->max_resp_sz > sent->max_resp_sz)
                return -EINVAL;
@@ -7188,11 +7189,14 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
        return 0;
 }
 
-static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
+               struct nfs41_create_session_res *res)
 {
        struct nfs4_channel_attrs *sent = &args->bc_attrs;
-       struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
+       struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
 
+       if (!(res->flags & SESSION4_BACK_CHAN))
+               goto out;
        if (rcvd->max_rqst_sz > sent->max_rqst_sz)
                return -EINVAL;
        if (rcvd->max_resp_sz < sent->max_resp_sz)
@@ -7204,18 +7208,30 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
                return -EINVAL;
        if (rcvd->max_reqs != sent->max_reqs)
                return -EINVAL;
+out:
        return 0;
 }
 
 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
-                                    struct nfs4_session *session)
+                                    struct nfs41_create_session_res *res)
 {
        int ret;
 
-       ret = nfs4_verify_fore_channel_attrs(args, session);
+       ret = nfs4_verify_fore_channel_attrs(args, res);
        if (ret)
                return ret;
-       return nfs4_verify_back_channel_attrs(args, session);
+       return nfs4_verify_back_channel_attrs(args, res);
+}
+
+static void nfs4_update_session(struct nfs4_session *session,
+               struct nfs41_create_session_res *res)
+{
+       nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
+       session->flags = res->flags;
+       memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
+       if (res->flags & SESSION4_BACK_CHAN)
+               memcpy(&session->bc_attrs, &res->bc_attrs,
+                               sizeof(session->bc_attrs));
 }
 
 static int _nfs4_proc_create_session(struct nfs_client *clp,
@@ -7224,11 +7240,12 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
        struct nfs4_session *session = clp->cl_session;
        struct nfs41_create_session_args args = {
                .client = clp,
+               .clientid = clp->cl_clientid,
+               .seqid = clp->cl_seqid,
                .cb_program = NFS4_CALLBACK,
        };
-       struct nfs41_create_session_res res = {
-               .client = clp,
-       };
+       struct nfs41_create_session_res res;
+
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
                .rpc_argp = &args,
@@ -7245,11 +7262,15 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
 
        if (!status) {
                /* Verify the session's negotiated channel_attrs values */
-               status = nfs4_verify_channel_attrs(&args, session);
+               status = nfs4_verify_channel_attrs(&args, &res);
                /* Increment the clientid slot sequence id */
-               clp->cl_seqid++;
+               if (clp->cl_seqid == res.seqid)
+                       clp->cl_seqid++;
+               if (status)
+                       goto out;
+               nfs4_update_session(session, &res);
        }
-
+out:
        return status;
 }
 
index e799dc3c3b1db9f7681199907bff5e7bffb3f853..e23366effcfb1e43bcb81983bcbaeacf2e512002 100644 (file)
@@ -450,7 +450,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
        tbl = &ses->fc_slot_table;
        tbl->session = ses;
        status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
-       if (status) /* -ENOMEM */
+       if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
                return status;
        /* Back channel */
        tbl = &ses->bc_slot_table;
index b34ada9bc6a2d03a677e46cc5ab8c4eb7c1fd838..fc46c745589863425bff271942b76f7812db5aea 100644 (file)
@@ -118,6 +118,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
        return 0;
 }
 
+static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
+               const struct nfs4_sessionid *src)
+{
+       memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
+}
+
 #ifdef CONFIG_CRC32
 /*
  * nfs_session_id_hash - calculate the crc32 hash for the session id
index e23a0a664e12d5130162bc320e80a57a5264bc9b..5c399ec41079687791d2fb668d5f6543ce374a2f 100644 (file)
@@ -1715,17 +1715,17 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
 #if defined(CONFIG_NFS_V4_1)
 /* NFSv4.1 operations */
 static void encode_bind_conn_to_session(struct xdr_stream *xdr,
-                                  struct nfs4_session *session,
+                                  struct nfs41_bind_conn_to_session_args *args,
                                   struct compound_hdr *hdr)
 {
        __be32 *p;
 
        encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
                decode_bind_conn_to_session_maxsz, hdr);
-       encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+       encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN);
        p = xdr_reserve_space(xdr, 8);
-       *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
-       *p = 0; /* use_conn_in_rdma_mode = False */
+       *p++ = cpu_to_be32(args->dir);
+       *p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
 }
 
 static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
@@ -1806,8 +1806,8 @@ static void encode_create_session(struct xdr_stream *xdr,
 
        encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
        p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
-       p = xdr_encode_hyper(p, clp->cl_clientid);
-       *p++ = cpu_to_be32(clp->cl_seqid);                      /*Sequence id */
+       p = xdr_encode_hyper(p, args->clientid);
+       *p++ = cpu_to_be32(args->seqid);                        /*Sequence id */
        *p++ = cpu_to_be32(args->flags);                        /*flags */
 
        /* Fore Channel */
@@ -2734,14 +2734,14 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
  */
 static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
                                struct xdr_stream *xdr,
-                               struct nfs_client *clp)
+                               struct nfs41_bind_conn_to_session_args *args)
 {
        struct compound_hdr hdr = {
-               .minorversion = clp->cl_mvops->minor_version,
+               .minorversion = args->client->cl_mvops->minor_version,
        };
 
        encode_compound_hdr(xdr, req, &hdr);
-       encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+       encode_bind_conn_to_session(xdr, args, &hdr);
        encode_nops(&hdr);
 }
 
@@ -5613,7 +5613,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr,
 
        status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
        if (!status)
-               status = decode_sessionid(xdr, &res->session->sess_id);
+               status = decode_sessionid(xdr, &res->sessionid);
        if (unlikely(status))
                return status;
 
@@ -5641,12 +5641,10 @@ static int decode_create_session(struct xdr_stream *xdr,
 {
        __be32 *p;
        int status;
-       struct nfs_client *clp = res->client;
-       struct nfs4_session *session = clp->cl_session;
 
        status = decode_op_hdr(xdr, OP_CREATE_SESSION);
        if (!status)
-               status = decode_sessionid(xdr, &session->sess_id);
+               status = decode_sessionid(xdr, &res->sessionid);
        if (unlikely(status))
                return status;
 
@@ -5654,13 +5652,13 @@ static int decode_create_session(struct xdr_stream *xdr,
        p = xdr_inline_decode(xdr, 8);
        if (unlikely(!p))
                goto out_overflow;
-       clp->cl_seqid = be32_to_cpup(p++);
-       session->flags = be32_to_cpup(p);
+       res->seqid = be32_to_cpup(p++);
+       res->flags = be32_to_cpup(p);
 
        /* Channel attributes */
-       status = decode_chan_attrs(xdr, &session->fc_attrs);
+       status = decode_chan_attrs(xdr, &res->fc_attrs);
        if (!status)
-               status = decode_chan_attrs(xdr, &session->bc_attrs);
+               status = decode_chan_attrs(xdr, &res->bc_attrs);
        return status;
 out_overflow:
        print_overflow_msg(__func__, xdr);
index 797cd6253adf74d809510151080d1d06439f378d..635f0865671cf38b27eea4a49261c9405a4a4731 100644 (file)
@@ -344,6 +344,10 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
                                                 struct xdr_stream *xdr,
                                                 gfp_t gfp_flags);
+void pnfs_layout_mark_request_commit(struct nfs_page *req,
+                                    struct pnfs_layout_segment *lseg,
+                                    struct nfs_commit_info *cinfo,
+                                    u32 ds_commit_idx);
 
 static inline bool nfs_have_layout(struct inode *inode)
 {
index fdc4f6562bb7efc65179f970053dfec6a9c38705..54e36b38fb5f89310287635e0838601ad07cf34a 100644 (file)
@@ -838,3 +838,33 @@ out_err:
        return NULL;
 }
 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
+
+void
+pnfs_layout_mark_request_commit(struct nfs_page *req,
+                               struct pnfs_layout_segment *lseg,
+                               struct nfs_commit_info *cinfo,
+                               u32 ds_commit_idx)
+{
+       struct list_head *list;
+       struct pnfs_commit_bucket *buckets;
+
+       spin_lock(cinfo->lock);
+       buckets = cinfo->ds->buckets;
+       list = &buckets[ds_commit_idx].written;
+       if (list_empty(list)) {
+               /* Non-empty buckets hold a reference on the lseg.  That ref
+                * is normally transferred to the COMMIT call and released
+                * there.  It could also be released if the last req is pulled
+                * off due to a rewrite, in which case it will be done in
+                * pnfs_common_clear_request_commit
+                */
+               WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
+               buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
+       }
+       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+       cinfo->ds->nwritten++;
+       spin_unlock(cinfo->lock);
+
+       nfs_request_add_commit_list(req, list, cinfo);
+}
+EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
index 88a6d2196ece3bf5ce7a94027dd96e8f25bc9792..595d81e354d18950a21615862b134ca8993f4c6e 100644 (file)
@@ -789,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
        nfs_list_add_request(req, dst);
        cinfo->mds->ncommit++;
        spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
-       }
+       if (!cinfo->dreq)
+               nfs_mark_page_unstable(req->wb_page);
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
@@ -1605,11 +1600,8 @@ void nfs_retry_commit(struct list_head *page_list,
                req = nfs_list_entry(page_list->next);
                nfs_list_remove_request(req);
                nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
-               if (!cinfo->dreq) {
-                       dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-                       dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                                    BDI_RECLAIMABLE);
-               }
+               if (!cinfo->dreq)
+                       nfs_clear_page_commit(req->wb_page);
                nfs_unlock_and_release_request(req);
        }
 }
index cc6a76072009262475c656307e454bb7ad6e44a1..1c307f02baa89e79f0a6d2889631d3ef50ed0a48 100644 (file)
@@ -583,7 +583,7 @@ nfs4_reset_recoverydir(char *recdir)
        if (status)
                return status;
        status = -ENOTDIR;
-       if (S_ISDIR(path.dentry->d_inode->i_mode)) {
+       if (d_is_dir(path.dentry)) {
                strcpy(user_recovery_dirname, recdir);
                status = 0;
        }
@@ -1426,7 +1426,7 @@ nfsd4_client_tracking_init(struct net *net)
        nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
        status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
        if (!status) {
-               status = S_ISDIR(path.dentry->d_inode->i_mode);
+               status = d_is_dir(path.dentry);
                path_put(&path);
                if (status)
                        goto do_init;
index f6b2a09f793f453e5b6c40e80ceb383a12af7616..d2f2c37dc2dbd2649399fe2ddad4032a5025d337 100644 (file)
@@ -1638,7 +1638,7 @@ __destroy_client(struct nfs4_client *clp)
                nfs4_put_stid(&dp->dl_stid);
        }
        while (!list_empty(&clp->cl_revoked)) {
-               dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
+               dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
                list_del_init(&dp->dl_recall_lru);
                nfs4_put_stid(&dp->dl_stid);
        }
index 965b478d50fc40a97a85243e478d9e47e3c38a51..e9fa966fc37fe5415f9fba50b61b37d459172303 100644 (file)
@@ -114,8 +114,8 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
         * We're exposing only the directories and symlinks that have to be
         * traversed on the way to real exports:
         */
-       if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
-                    !S_ISLNK(dentry->d_inode->i_mode)))
+       if (unlikely(!d_is_dir(dentry) &&
+                    !d_is_symlink(dentry)))
                return nfserr_stale;
        /*
         * A pseudoroot export gives permission to access only one
@@ -259,7 +259,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
                goto out;
        }
 
-       if (S_ISDIR(dentry->d_inode->i_mode) &&
+       if (d_is_dir(dentry) &&
                        (dentry->d_flags & DCACHE_DISCONNECTED)) {
                printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
                                dentry);
@@ -414,7 +414,7 @@ static inline void _fh_update_old(struct dentry *dentry,
 {
        fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
        fh->ofh_generation = dentry->d_inode->i_generation;
-       if (S_ISDIR(dentry->d_inode->i_mode) ||
+       if (d_is_dir(dentry) ||
            (exp->ex_flags & NFSEXP_NOSUBTREECHECK))
                fh->ofh_dirino = 0;
 }
index 5685c679dd93d4371626de7d6107a9ac66a98093..36852658242943051f1a1cac7d4b6a8c1945f84c 100644 (file)
@@ -615,9 +615,9 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
        export = fhp->fh_export;
        dentry = fhp->fh_dentry;
 
-       if (S_ISREG(dentry->d_inode->i_mode))
+       if (d_is_reg(dentry))
                map = nfs3_regaccess;
-       else if (S_ISDIR(dentry->d_inode->i_mode))
+       else if (d_is_dir(dentry))
                map = nfs3_diraccess;
        else
                map = nfs3_anyaccess;
@@ -1402,7 +1402,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
 
                switch (createmode) {
                case NFS3_CREATE_UNCHECKED:
-                       if (! S_ISREG(dchild->d_inode->i_mode))
+                       if (! d_is_reg(dchild))
                                goto out;
                        else if (truncp) {
                                /* in nfsv4, we need to treat this case a little
@@ -1615,7 +1615,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        if (err)
                goto out;
        err = nfserr_isdir;
-       if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode))
+       if (d_is_dir(tfhp->fh_dentry))
                goto out;
        err = nfserr_perm;
        if (!len)
index b2e3ff34762070a4b37085c051223809b59e4a6c..ecdbae19a766d914e7d68c005c14356455dc83af 100644 (file)
@@ -31,6 +31,8 @@
 #include "alloc.h"
 #include "dat.h"
 
+static void __nilfs_btree_init(struct nilfs_bmap *bmap);
+
 static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
 {
        struct nilfs_btree_path *path;
@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
        return ret;
 }
 
+/**
+ * nilfs_btree_root_broken - verify consistency of btree root node
+ * @node: btree root node to be examined
+ * @ino: inode number
+ *
+ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ */
+static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+                                  unsigned long ino)
+{
+       int level, flags, nchildren;
+       int ret = 0;
+
+       level = nilfs_btree_node_get_level(node);
+       flags = nilfs_btree_node_get_flags(node);
+       nchildren = nilfs_btree_node_get_nchildren(node);
+
+       if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+                    level > NILFS_BTREE_LEVEL_MAX ||
+                    nchildren < 0 ||
+                    nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+               pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
+                       ino, level, flags, nchildren);
+               ret = 1;
+       }
+       return ret;
+}
+
 int nilfs_btree_broken_node_block(struct buffer_head *bh)
 {
        int ret;
@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
 
        /* convert and insert */
        dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
-       nilfs_btree_init(btree);
+       __nilfs_btree_init(btree);
        if (nreq != NULL) {
                nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
                nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
        .bop_gather_data        =       NULL,
 };
 
-int nilfs_btree_init(struct nilfs_bmap *bmap)
+static void __nilfs_btree_init(struct nilfs_bmap *bmap)
 {
        bmap->b_ops = &nilfs_btree_ops;
        bmap->b_nchildren_per_block =
                NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
-       return 0;
+}
+
+int nilfs_btree_init(struct nilfs_bmap *bmap)
+{
+       int ret = 0;
+
+       __nilfs_btree_init(bmap);
+
+       if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
+                                   bmap->b_inode->i_ino))
+               ret = -EIO;
+       return ret;
 }
 
 void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
index 51ceb81072847441135303dd752f57e6a97e2646..9a66ff79ff2781d1c7992dbd3d4ec42a82008f59 100644 (file)
@@ -115,8 +115,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
                return false;
 
        /* sorry, fanotify only gives a damn about files and dirs */
-       if (!S_ISREG(path->dentry->d_inode->i_mode) &&
-           !S_ISDIR(path->dentry->d_inode->i_mode))
+       if (!d_is_reg(path->dentry) &&
+           !d_can_lookup(path->dentry))
                return false;
 
        if (inode_mark && vfsmnt_mark) {
@@ -139,7 +139,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
                BUG();
        }
 
-       if (S_ISDIR(path->dentry->d_inode->i_mode) &&
+       if (d_is_dir(path->dentry) &&
            !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
                return false;
 
index ea10a87191072339b1352c01f0261da054701a9b..24f640441bd90977a079aac782768025c68f3712 100644 (file)
@@ -191,7 +191,6 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
                ovl_set_timestamps(upperdentry, stat);
 
        return err;
-
 }
 
 static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
@@ -385,7 +384,7 @@ int ovl_copy_up(struct dentry *dentry)
                struct kstat stat;
                enum ovl_path_type type = ovl_path_type(dentry);
 
-               if (type != OVL_PATH_LOWER)
+               if (OVL_TYPE_UPPER(type))
                        break;
 
                next = dget(dentry);
@@ -394,7 +393,7 @@ int ovl_copy_up(struct dentry *dentry)
                        parent = dget_parent(next);
 
                        type = ovl_path_type(parent);
-                       if (type != OVL_PATH_LOWER)
+                       if (OVL_TYPE_UPPER(type))
                                break;
 
                        dput(next);
index 8ffc4b980f1b68641c17a7bfb67979f205c658b5..d139405d2bfad7cfd94c735913ecebf221def5b5 100644 (file)
@@ -19,7 +19,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        int err;
 
        dget(wdentry);
-       if (S_ISDIR(wdentry->d_inode->i_mode))
+       if (d_is_dir(wdentry))
                err = ovl_do_rmdir(wdir, wdentry);
        else
                err = ovl_do_unlink(wdir, wdentry);
@@ -118,14 +118,14 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 
 static int ovl_set_opaque(struct dentry *upperdentry)
 {
-       return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
+       return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
 }
 
 static void ovl_remove_opaque(struct dentry *upperdentry)
 {
        int err;
 
-       err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr);
+       err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
        if (err) {
                pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
                        upperdentry->d_name.name, err);
@@ -152,7 +152,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
         * correct link count.  nlink=1 seems to pacify 'find' and
         * other utilities.
         */
-       if (type == OVL_PATH_MERGE)
+       if (OVL_TYPE_MERGE(type))
                stat->nlink = 1;
 
        return 0;
@@ -506,7 +506,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *opaquedir = NULL;
        int err;
 
-       if (is_dir) {
+       if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
                opaquedir = ovl_check_empty_and_clear(dentry);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir))
@@ -630,7 +630,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                goto out_drop_write;
 
        type = ovl_path_type(dentry);
-       if (type == OVL_PATH_PURE_UPPER) {
+       if (OVL_TYPE_PURE_UPPER(type)) {
                err = ovl_remove_upper(dentry, is_dir);
        } else {
                const struct cred *old_cred;
@@ -693,7 +693,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        bool new_create = false;
        bool cleanup_whiteout = false;
        bool overwrite = !(flags & RENAME_EXCHANGE);
-       bool is_dir = S_ISDIR(old->d_inode->i_mode);
+       bool is_dir = d_is_dir(old);
        bool new_is_dir = false;
        struct dentry *opaquedir = NULL;
        const struct cred *old_cred = NULL;
@@ -712,7 +712,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        /* Don't copy up directory trees */
        old_type = ovl_path_type(old);
        err = -EXDEV;
-       if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir)
+       if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
                goto out;
 
        if (new->d_inode) {
@@ -720,30 +720,30 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                if (err)
                        goto out;
 
-               if (S_ISDIR(new->d_inode->i_mode))
+               if (d_is_dir(new))
                        new_is_dir = true;
 
                new_type = ovl_path_type(new);
                err = -EXDEV;
-               if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir)
+               if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
                        goto out;
 
                err = 0;
-               if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
+               if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
                        if (ovl_dentry_lower(old)->d_inode ==
                            ovl_dentry_lower(new)->d_inode)
                                goto out;
                }
-               if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
+               if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
                        if (ovl_dentry_upper(old)->d_inode ==
                            ovl_dentry_upper(new)->d_inode)
                                goto out;
                }
        } else {
                if (ovl_dentry_is_opaque(new))
-                       new_type = OVL_PATH_UPPER;
+                       new_type = __OVL_PATH_UPPER;
                else
-                       new_type = OVL_PATH_PURE_UPPER;
+                       new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
        }
 
        err = ovl_want_write(old);
@@ -763,8 +763,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                        goto out_drop_write;
        }
 
-       old_opaque = old_type != OVL_PATH_PURE_UPPER;
-       new_opaque = new_type != OVL_PATH_PURE_UPPER;
+       old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
+       new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
 
        if (old_opaque || new_opaque) {
                err = -ENOMEM;
@@ -787,7 +787,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                old_cred = override_creds(override_cred);
        }
 
-       if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
+       if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
                opaquedir = ovl_check_empty_and_clear(new);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir)) {
index 07d74b24913bdee757377d103427883fb7d12e70..04f1248846877d019625c861b474702177b38ae5 100644 (file)
@@ -205,7 +205,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
 
 static bool ovl_is_private_xattr(const char *name)
 {
-       return strncmp(name, "trusted.overlay.", 14) == 0;
+       return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
 }
 
 int ovl_setxattr(struct dentry *dentry, const char *name,
@@ -238,7 +238,10 @@ out:
 static bool ovl_need_xattr_filter(struct dentry *dentry,
                                  enum ovl_path_type type)
 {
-       return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
+       if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
+               return S_ISDIR(dentry->d_inode->i_mode);
+       else
+               return false;
 }
 
 ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
@@ -299,7 +302,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
        if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                goto out_drop_write;
 
-       if (type == OVL_PATH_LOWER) {
+       if (!OVL_TYPE_UPPER(type)) {
                err = vfs_getxattr(realpath.dentry, name, NULL, 0);
                if (err < 0)
                        goto out_drop_write;
@@ -321,7 +324,7 @@ out:
 static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
                                  struct dentry *realdentry)
 {
-       if (type != OVL_PATH_LOWER)
+       if (OVL_TYPE_UPPER(type))
                return false;
 
        if (special_file(realdentry->d_inode->i_mode))
@@ -430,5 +433,4 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
        }
 
        return inode;
-
 }
index 814bed33dd078c00ade8ed534ae19e969530ccdb..17ac5afc9ffbce150d03e352fd7aaa99f101aa0c 100644 (file)
 struct ovl_entry;
 
 enum ovl_path_type {
-       OVL_PATH_PURE_UPPER,
-       OVL_PATH_UPPER,
-       OVL_PATH_MERGE,
-       OVL_PATH_LOWER,
+       __OVL_PATH_PURE         = (1 << 0),
+       __OVL_PATH_UPPER        = (1 << 1),
+       __OVL_PATH_MERGE        = (1 << 2),
 };
 
-extern const char *ovl_opaque_xattr;
+#define OVL_TYPE_UPPER(type)   ((type) & __OVL_PATH_UPPER)
+#define OVL_TYPE_MERGE(type)   ((type) & __OVL_PATH_MERGE)
+#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
+#define OVL_TYPE_MERGE_OR_LOWER(type) \
+       (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
+
+#define OVL_XATTR_PRE_NAME "trusted.overlay."
+#define OVL_XATTR_PRE_LEN  16
+#define OVL_XATTR_OPAQUE   OVL_XATTR_PRE_NAME"opaque"
 
 static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -130,6 +137,7 @@ void ovl_dentry_version_inc(struct dentry *dentry);
 void ovl_path_upper(struct dentry *dentry, struct path *path);
 void ovl_path_lower(struct dentry *dentry, struct path *path);
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
index c0205990a9f54d03f77909555e368efc5a90bd6f..907870e81a72e36f4c5abb29fd34e23b4307efc5 100644 (file)
@@ -24,7 +24,6 @@ struct ovl_cache_entry {
        struct list_head l_node;
        struct rb_node node;
        bool is_whiteout;
-       bool is_cursor;
        char name[];
 };
 
@@ -40,6 +39,7 @@ struct ovl_readdir_data {
        struct rb_root root;
        struct list_head *list;
        struct list_head middle;
+       struct dentry *dir;
        int count;
        int err;
 };
@@ -48,7 +48,7 @@ struct ovl_dir_file {
        bool is_real;
        bool is_upper;
        struct ovl_dir_cache *cache;
-       struct ovl_cache_entry cursor;
+       struct list_head *cursor;
        struct file *realfile;
        struct file *upperfile;
 };
@@ -79,23 +79,49 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
        return NULL;
 }
 
-static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
+static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+                                                  const char *name, int len,
                                                   u64 ino, unsigned int d_type)
 {
        struct ovl_cache_entry *p;
        size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
 
        p = kmalloc(size, GFP_KERNEL);
-       if (p) {
-               memcpy(p->name, name, len);
-               p->name[len] = '\0';
-               p->len = len;
-               p->type = d_type;
-               p->ino = ino;
-               p->is_whiteout = false;
-               p->is_cursor = false;
-       }
+       if (!p)
+               return NULL;
+
+       memcpy(p->name, name, len);
+       p->name[len] = '\0';
+       p->len = len;
+       p->type = d_type;
+       p->ino = ino;
+       p->is_whiteout = false;
+
+       if (d_type == DT_CHR) {
+               struct dentry *dentry;
+               const struct cred *old_cred;
+               struct cred *override_cred;
+
+               override_cred = prepare_creds();
+               if (!override_cred) {
+                       kfree(p);
+                       return NULL;
+               }
+
+               /*
+                * CAP_DAC_OVERRIDE for lookup
+                */
+               cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+               old_cred = override_creds(override_cred);
 
+               dentry = lookup_one_len(name, dir, len);
+               if (!IS_ERR(dentry)) {
+                       p->is_whiteout = ovl_is_whiteout(dentry);
+                       dput(dentry);
+               }
+               revert_creds(old_cred);
+               put_cred(override_cred);
+       }
        return p;
 }
 
@@ -122,7 +148,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
                        return 0;
        }
 
-       p = ovl_cache_entry_new(name, len, ino, d_type);
+       p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
        if (p == NULL)
                return -ENOMEM;
 
@@ -143,7 +169,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
        if (p) {
                list_move_tail(&p->l_node, &rdd->middle);
        } else {
-               p = ovl_cache_entry_new(name, namelen, ino, d_type);
+               p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
                if (p == NULL)
                        rdd->err = -ENOMEM;
                else
@@ -168,7 +194,6 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
 {
        struct ovl_dir_cache *cache = od->cache;
 
-       list_del_init(&od->cursor.l_node);
        WARN_ON(cache->refcount <= 0);
        cache->refcount--;
        if (!cache->refcount) {
@@ -204,6 +229,7 @@ static inline int ovl_dir_read(struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
+       rdd->dir = realpath->dentry;
        rdd->ctx.pos = 0;
        do {
                rdd->count = 0;
@@ -227,108 +253,58 @@ static void ovl_dir_reset(struct file *file)
        if (cache && ovl_dentry_version_get(dentry) != cache->version) {
                ovl_cache_put(od, dentry);
                od->cache = NULL;
+               od->cursor = NULL;
        }
-       WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
-       if (od->is_real && type == OVL_PATH_MERGE)
+       WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
+       if (od->is_real && OVL_TYPE_MERGE(type))
                od->is_real = false;
 }
 
-static int ovl_dir_mark_whiteouts(struct dentry *dir,
-                                 struct ovl_readdir_data *rdd)
-{
-       struct ovl_cache_entry *p;
-       struct dentry *dentry;
-       const struct cred *old_cred;
-       struct cred *override_cred;
-
-       override_cred = prepare_creds();
-       if (!override_cred) {
-               ovl_cache_free(rdd->list);
-               return -ENOMEM;
-       }
-
-       /*
-        * CAP_DAC_OVERRIDE for lookup
-        */
-       cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-       old_cred = override_creds(override_cred);
-
-       mutex_lock(&dir->d_inode->i_mutex);
-       list_for_each_entry(p, rdd->list, l_node) {
-               if (p->is_cursor)
-                       continue;
-
-               if (p->type != DT_CHR)
-                       continue;
-
-               dentry = lookup_one_len(p->name, dir, p->len);
-               if (IS_ERR(dentry))
-                       continue;
-
-               p->is_whiteout = ovl_is_whiteout(dentry);
-               dput(dentry);
-       }
-       mutex_unlock(&dir->d_inode->i_mutex);
-
-       revert_creds(old_cred);
-       put_cred(override_cred);
-
-       return 0;
-}
-
 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
 {
        int err;
-       struct path lowerpath;
-       struct path upperpath;
+       struct path realpath;
        struct ovl_readdir_data rdd = {
                .ctx.actor = ovl_fill_merge,
                .list = list,
                .root = RB_ROOT,
                .is_merge = false,
        };
+       int idx, next;
 
-       ovl_path_lower(dentry, &lowerpath);
-       ovl_path_upper(dentry, &upperpath);
+       for (idx = 0; idx != -1; idx = next) {
+               next = ovl_path_next(idx, dentry, &realpath);
 
-       if (upperpath.dentry) {
-               err = ovl_dir_read(&upperpath, &rdd);
-               if (err)
-                       goto out;
-
-               if (lowerpath.dentry) {
-                       err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
+               if (next != -1) {
+                       err = ovl_dir_read(&realpath, &rdd);
                        if (err)
-                               goto out;
+                               break;
+               } else {
+                       /*
+                        * Insert lowest layer entries before upper ones, this
+                        * allows offsets to be reasonably constant
+                        */
+                       list_add(&rdd.middle, rdd.list);
+                       rdd.is_merge = true;
+                       err = ovl_dir_read(&realpath, &rdd);
+                       list_del(&rdd.middle);
                }
        }
-       if (lowerpath.dentry) {
-               /*
-                * Insert lowerpath entries before upperpath ones, this allows
-                * offsets to be reasonably constant
-                */
-               list_add(&rdd.middle, rdd.list);
-               rdd.is_merge = true;
-               err = ovl_dir_read(&lowerpath, &rdd);
-               list_del(&rdd.middle);
-       }
-out:
        return err;
 }
 
 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
 {
-       struct ovl_cache_entry *p;
+       struct list_head *p;
        loff_t off = 0;
 
-       list_for_each_entry(p, &od->cache->entries, l_node) {
-               if (p->is_cursor)
-                       continue;
+       list_for_each(p, &od->cache->entries) {
                if (off >= pos)
                        break;
                off++;
        }
-       list_move_tail(&od->cursor.l_node, &p->l_node);
+       /* Cursor is safe since the cache is stable */
+       od->cursor = p;
 }
 
 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
@@ -367,6 +343,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
 {
        struct ovl_dir_file *od = file->private_data;
        struct dentry *dentry = file->f_path.dentry;
+       struct ovl_cache_entry *p;
 
        if (!ctx->pos)
                ovl_dir_reset(file);
@@ -385,19 +362,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                ovl_seek_cursor(od, ctx->pos);
        }
 
-       while (od->cursor.l_node.next != &od->cache->entries) {
-               struct ovl_cache_entry *p;
-
-               p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
-               /* Skip cursors */
-               if (!p->is_cursor) {
-                       if (!p->is_whiteout) {
-                               if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
-                                       break;
-                       }
-                       ctx->pos++;
-               }
-               list_move(&od->cursor.l_node, &p->l_node);
+       while (od->cursor != &od->cache->entries) {
+               p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
+               if (!p->is_whiteout)
+                       if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
+                               break;
+               od->cursor = p->l_node.next;
+               ctx->pos++;
        }
        return 0;
 }
@@ -452,7 +423,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        /*
         * Need to check if we started out being a lower dir, but got copied up
         */
-       if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
+       if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
                struct inode *inode = file_inode(file);
 
                realfile = lockless_dereference(od->upperfile);
@@ -516,11 +487,9 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
                kfree(od);
                return PTR_ERR(realfile);
        }
-       INIT_LIST_HEAD(&od->cursor.l_node);
        od->realfile = realfile;
-       od->is_real = (type != OVL_PATH_MERGE);
-       od->is_upper = (type != OVL_PATH_LOWER);
-       od->cursor.is_cursor = true;
+       od->is_real = !OVL_TYPE_MERGE(type);
+       od->is_upper = OVL_TYPE_UPPER(type);
        file->private_data = od;
 
        return 0;
index f16d318b71f8bbe4e77f8a3214e616101848e49c..b90952f528b1cdf7414e49b5613e439d6b89fb7a 100644 (file)
@@ -35,7 +35,8 @@ struct ovl_config {
 /* private information held for overlayfs's superblock */
 struct ovl_fs {
        struct vfsmount *upper_mnt;
-       struct vfsmount *lower_mnt;
+       unsigned numlower;
+       struct vfsmount **lower_mnt;
        struct dentry *workdir;
        long lower_namelen;
        /* pathnames of lower and upper dirs, for show_options */
@@ -47,7 +48,6 @@ struct ovl_dir_cache;
 /* private information held for every overlayfs dentry */
 struct ovl_entry {
        struct dentry *__upperdentry;
-       struct dentry *lowerdentry;
        struct ovl_dir_cache *cache;
        union {
                struct {
@@ -56,30 +56,36 @@ struct ovl_entry {
                };
                struct rcu_head rcu;
        };
+       unsigned numlower;
+       struct path lowerstack[];
 };
 
-const char *ovl_opaque_xattr = "trusted.overlay.opaque";
+#define OVL_MAX_STACK 500
 
+static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
+{
+       return oe->numlower ? oe->lowerstack[0].dentry : NULL;
+}
 
 enum ovl_path_type ovl_path_type(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
+       enum ovl_path_type type = 0;
 
        if (oe->__upperdentry) {
-               if (oe->lowerdentry) {
+               type = __OVL_PATH_UPPER;
+
+               if (oe->numlower) {
                        if (S_ISDIR(dentry->d_inode->i_mode))
-                               return OVL_PATH_MERGE;
-                       else
-                               return OVL_PATH_UPPER;
-               } else {
-                       if (oe->opaque)
-                               return OVL_PATH_UPPER;
-                       else
-                               return OVL_PATH_PURE_UPPER;
+                               type |= __OVL_PATH_MERGE;
+               } else if (!oe->opaque) {
+                       type |= __OVL_PATH_PURE;
                }
        } else {
-               return OVL_PATH_LOWER;
+               if (oe->numlower > 1)
+                       type |= __OVL_PATH_MERGE;
        }
+       return type;
 }
 
 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
@@ -98,10 +104,9 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
 
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
 {
-
        enum ovl_path_type type = ovl_path_type(dentry);
 
-       if (type == OVL_PATH_LOWER)
+       if (!OVL_TYPE_UPPER(type))
                ovl_path_lower(dentry, path);
        else
                ovl_path_upper(dentry, path);
@@ -120,7 +125,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
 
-       return oe->lowerdentry;
+       return __ovl_dentry_lower(oe);
 }
 
 struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -130,7 +135,7 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
 
        realdentry = ovl_upperdentry_dereference(oe);
        if (!realdentry)
-               realdentry = oe->lowerdentry;
+               realdentry = __ovl_dentry_lower(oe);
 
        return realdentry;
 }
@@ -143,7 +148,7 @@ struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
        if (realdentry) {
                *is_upper = true;
        } else {
-               realdentry = oe->lowerdentry;
+               realdentry = __ovl_dentry_lower(oe);
                *is_upper = false;
        }
        return realdentry;
@@ -165,11 +170,9 @@ void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
 
 void ovl_path_lower(struct dentry *dentry, struct path *path)
 {
-       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
        struct ovl_entry *oe = dentry->d_fsdata;
 
-       path->mnt = ofs->lower_mnt;
-       path->dentry = oe->lowerdentry;
+       *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
 }
 
 int ovl_want_write(struct dentry *dentry)
@@ -249,7 +252,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
        if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
                return false;
 
-       res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1);
+       res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
        if (res == 1 && val == 'y')
                return true;
 
@@ -261,8 +264,11 @@ static void ovl_dentry_release(struct dentry *dentry)
        struct ovl_entry *oe = dentry->d_fsdata;
 
        if (oe) {
+               unsigned int i;
+
                dput(oe->__upperdentry);
-               dput(oe->lowerdentry);
+               for (i = 0; i < oe->numlower; i++)
+                       dput(oe->lowerstack[i].dentry);
                kfree_rcu(oe, rcu);
        }
 }
@@ -271,9 +277,15 @@ static const struct dentry_operations ovl_dentry_operations = {
        .d_release = ovl_dentry_release,
 };
 
-static struct ovl_entry *ovl_alloc_entry(void)
+static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
 {
-       return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
+       size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
+       struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
+
+       if (oe)
+               oe->numlower = numlower;
+
+       return oe;
 }
 
 static inline struct dentry *ovl_lookup_real(struct dentry *dir,
@@ -295,82 +307,154 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
        return dentry;
 }
 
+/*
+ * Returns next layer in stack starting from top.
+ * Returns -1 if this is the last layer.
+ */
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       BUG_ON(idx < 0);
+       if (idx == 0) {
+               ovl_path_upper(dentry, path);
+               if (path->dentry)
+                       return oe->numlower ? 1 : -1;
+               idx++;
+       }
+       BUG_ON(idx > oe->numlower);
+       *path = oe->lowerstack[idx - 1];
+
+       return (idx < oe->numlower) ? idx + 1 : -1;
+}
+
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                          unsigned int flags)
 {
        struct ovl_entry *oe;
-       struct dentry *upperdir;
-       struct dentry *lowerdir;
-       struct dentry *upperdentry = NULL;
-       struct dentry *lowerdentry = NULL;
+       struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+       struct path *stack = NULL;
+       struct dentry *upperdir, *upperdentry = NULL;
+       unsigned int ctr = 0;
        struct inode *inode = NULL;
+       bool upperopaque = false;
+       struct dentry *this, *prev = NULL;
+       unsigned int i;
        int err;
 
-       err = -ENOMEM;
-       oe = ovl_alloc_entry();
-       if (!oe)
-               goto out;
-
-       upperdir = ovl_dentry_upper(dentry->d_parent);
-       lowerdir = ovl_dentry_lower(dentry->d_parent);
-
+       upperdir = ovl_upperdentry_dereference(poe);
        if (upperdir) {
-               upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
-               err = PTR_ERR(upperdentry);
-               if (IS_ERR(upperdentry))
-                       goto out_put_dir;
-
-               if (lowerdir && upperdentry) {
-                       if (ovl_is_whiteout(upperdentry)) {
-                               dput(upperdentry);
-                               upperdentry = NULL;
-                               oe->opaque = true;
-                       } else if (ovl_is_opaquedir(upperdentry)) {
-                               oe->opaque = true;
+               this = ovl_lookup_real(upperdir, &dentry->d_name);
+               err = PTR_ERR(this);
+               if (IS_ERR(this))
+                       goto out;
+
+               if (this) {
+                       if (ovl_is_whiteout(this)) {
+                               dput(this);
+                               this = NULL;
+                               upperopaque = true;
+                       } else if (poe->numlower && ovl_is_opaquedir(this)) {
+                               upperopaque = true;
                        }
                }
+               upperdentry = prev = this;
        }
-       if (lowerdir && !oe->opaque) {
-               lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
-               err = PTR_ERR(lowerdentry);
-               if (IS_ERR(lowerdentry))
-                       goto out_dput_upper;
+
+       if (!upperopaque && poe->numlower) {
+               err = -ENOMEM;
+               stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL);
+               if (!stack)
+                       goto out_put_upper;
        }
 
-       if (lowerdentry && upperdentry &&
-           (!S_ISDIR(upperdentry->d_inode->i_mode) ||
-            !S_ISDIR(lowerdentry->d_inode->i_mode))) {
-               dput(lowerdentry);
-               lowerdentry = NULL;
-               oe->opaque = true;
+       for (i = 0; !upperopaque && i < poe->numlower; i++) {
+               bool opaque = false;
+               struct path lowerpath = poe->lowerstack[i];
+
+               this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
+               err = PTR_ERR(this);
+               if (IS_ERR(this)) {
+                       /*
+                        * If it's positive, then treat ENAMETOOLONG as ENOENT.
+                        */
+                       if (err == -ENAMETOOLONG && (upperdentry || ctr))
+                               continue;
+                       goto out_put;
+               }
+               if (!this)
+                       continue;
+               if (ovl_is_whiteout(this)) {
+                       dput(this);
+                       break;
+               }
+               /*
+                * Only makes sense to check opaque dir if this is not the
+                * lowermost layer.
+                */
+               if (i < poe->numlower - 1 && ovl_is_opaquedir(this))
+                       opaque = true;
+
+               if (prev && (!S_ISDIR(prev->d_inode->i_mode) ||
+                            !S_ISDIR(this->d_inode->i_mode))) {
+                       /*
+                        * FIXME: check for upper-opaqueness maybe better done
+                        * in remove code.
+                        */
+                       if (prev == upperdentry)
+                               upperopaque = true;
+                       dput(this);
+                       break;
+               }
+               /*
+                * If this is a non-directory then stop here.
+                */
+               if (!S_ISDIR(this->d_inode->i_mode))
+                       opaque = true;
+
+               stack[ctr].dentry = this;
+               stack[ctr].mnt = lowerpath.mnt;
+               ctr++;
+               prev = this;
+               if (opaque)
+                       break;
        }
 
-       if (lowerdentry || upperdentry) {
+       oe = ovl_alloc_entry(ctr);
+       err = -ENOMEM;
+       if (!oe)
+               goto out_put;
+
+       if (upperdentry || ctr) {
                struct dentry *realdentry;
 
-               realdentry = upperdentry ? upperdentry : lowerdentry;
+               realdentry = upperdentry ? upperdentry : stack[0].dentry;
+
                err = -ENOMEM;
                inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
                                      oe);
                if (!inode)
-                       goto out_dput;
+                       goto out_free_oe;
                ovl_copyattr(realdentry->d_inode, inode);
        }
 
+       oe->opaque = upperopaque;
        oe->__upperdentry = upperdentry;
-       oe->lowerdentry = lowerdentry;
-
+       memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+       kfree(stack);
        dentry->d_fsdata = oe;
        d_add(dentry, inode);
 
        return NULL;
 
-out_dput:
-       dput(lowerdentry);
-out_dput_upper:
-       dput(upperdentry);
-out_put_dir:
+out_free_oe:
        kfree(oe);
+out_put:
+       for (i = 0; i < ctr; i++)
+               dput(stack[i].dentry);
+       kfree(stack);
+out_put_upper:
+       dput(upperdentry);
 out:
        return ERR_PTR(err);
 }
@@ -383,10 +467,12 @@ struct file *ovl_path_open(struct path *path, int flags)
 static void ovl_put_super(struct super_block *sb)
 {
        struct ovl_fs *ufs = sb->s_fs_info;
+       unsigned i;
 
        dput(ufs->workdir);
        mntput(ufs->upper_mnt);
-       mntput(ufs->lower_mnt);
+       for (i = 0; i < ufs->numlower; i++)
+               mntput(ufs->lower_mnt[i]);
 
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
@@ -400,7 +486,7 @@ static void ovl_put_super(struct super_block *sb)
  * @buf: The struct kstatfs to fill in with stats
  *
  * Get the filesystem statistics.  As writes always target the upper layer
- * filesystem pass the statfs to the same filesystem.
+ * filesystem pass the statfs to the upper filesystem (if it exists)
  */
 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -409,7 +495,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct path path;
        int err;
 
-       ovl_path_upper(root_dentry, &path);
+       ovl_path_real(root_dentry, &path);
 
        err = vfs_statfs(&path, buf);
        if (!err) {
@@ -432,8 +518,21 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        struct ovl_fs *ufs = sb->s_fs_info;
 
        seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
-       seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
-       seq_printf(m, ",workdir=%s", ufs->config.workdir);
+       if (ufs->config.upperdir) {
+               seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
+               seq_printf(m, ",workdir=%s", ufs->config.workdir);
+       }
+       return 0;
+}
+
+static int ovl_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct ovl_fs *ufs = sb->s_fs_info;
+
+       if (!(*flags & MS_RDONLY) &&
+           (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)))
+               return -EROFS;
+
        return 0;
 }
 
@@ -441,6 +540,7 @@ static const struct super_operations ovl_super_operations = {
        .put_super      = ovl_put_super,
        .statfs         = ovl_statfs,
        .show_options   = ovl_show_options,
+       .remount_fs     = ovl_remount,
 };
 
 enum {
@@ -585,24 +685,6 @@ static void ovl_unescape(char *s)
        }
 }
 
-static int ovl_mount_dir(const char *name, struct path *path)
-{
-       int err;
-       char *tmp = kstrdup(name, GFP_KERNEL);
-
-       if (!tmp)
-               return -ENOMEM;
-
-       ovl_unescape(tmp);
-       err = kern_path(tmp, LOOKUP_FOLLOW, path);
-       if (err) {
-               pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
-               err = -EINVAL;
-       }
-       kfree(tmp);
-       return err;
-}
-
 static bool ovl_is_allowed_fs_type(struct dentry *root)
 {
        const struct dentry_operations *dop = root->d_op;
@@ -622,6 +704,75 @@ static bool ovl_is_allowed_fs_type(struct dentry *root)
        return true;
 }
 
+static int ovl_mount_dir_noesc(const char *name, struct path *path)
+{
+       int err = -EINVAL;
+
+       if (!*name) {
+               pr_err("overlayfs: empty lowerdir\n");
+               goto out;
+       }
+       err = kern_path(name, LOOKUP_FOLLOW, path);
+       if (err) {
+               pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+               goto out;
+       }
+       err = -EINVAL;
+       if (!ovl_is_allowed_fs_type(path->dentry)) {
+               pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+               goto out_put;
+       }
+       if (!S_ISDIR(path->dentry->d_inode->i_mode)) {
+               pr_err("overlayfs: '%s' not a directory\n", name);
+               goto out_put;
+       }
+       return 0;
+
+out_put:
+       path_put(path);
+out:
+       return err;
+}
+
+static int ovl_mount_dir(const char *name, struct path *path)
+{
+       int err = -ENOMEM;
+       char *tmp = kstrdup(name, GFP_KERNEL);
+
+       if (tmp) {
+               ovl_unescape(tmp);
+               err = ovl_mount_dir_noesc(tmp, path);
+               kfree(tmp);
+       }
+       return err;
+}
+
+static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
+                        int *stack_depth)
+{
+       int err;
+       struct kstatfs statfs;
+
+       err = ovl_mount_dir_noesc(name, path);
+       if (err)
+               goto out;
+
+       err = vfs_statfs(path, &statfs);
+       if (err) {
+               pr_err("overlayfs: statfs failed on '%s'\n", name);
+               goto out_put;
+       }
+       *namelen = max(*namelen, statfs.f_namelen);
+       *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
+
+       return 0;
+
+out_put:
+       path_put(path);
+out:
+       return err;
+}
+
 /* Workdir should not be subdir of upperdir and vice versa */
 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
 {
@@ -634,16 +785,39 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
        return ok;
 }
 
+static unsigned int ovl_split_lowerdirs(char *str)
+{
+       unsigned int ctr = 1;
+       char *s, *d;
+
+       for (s = d = str;; s++, d++) {
+               if (*s == '\\') {
+                       s++;
+               } else if (*s == ':') {
+                       *d = '\0';
+                       ctr++;
+                       continue;
+               }
+               *d = *s;
+               if (!*s)
+                       break;
+       }
+       return ctr;
+}
+
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
-       struct path lowerpath;
-       struct path upperpath;
-       struct path workpath;
-       struct inode *root_inode;
+       struct path upperpath = { NULL, NULL };
+       struct path workpath = { NULL, NULL };
        struct dentry *root_dentry;
        struct ovl_entry *oe;
        struct ovl_fs *ufs;
-       struct kstatfs statfs;
+       struct path *stack = NULL;
+       char *lowertmp;
+       char *lower;
+       unsigned int numlower;
+       unsigned int stacklen = 0;
+       unsigned int i;
        int err;
 
        err = -ENOMEM;
@@ -655,123 +829,135 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (err)
                goto out_free_config;
 
-       /* FIXME: workdir is not needed for a R/O mount */
        err = -EINVAL;
-       if (!ufs->config.upperdir || !ufs->config.lowerdir ||
-           !ufs->config.workdir) {
-               pr_err("overlayfs: missing upperdir or lowerdir or workdir\n");
+       if (!ufs->config.lowerdir) {
+               pr_err("overlayfs: missing 'lowerdir'\n");
                goto out_free_config;
        }
 
-       err = -ENOMEM;
-       oe = ovl_alloc_entry();
-       if (oe == NULL)
-               goto out_free_config;
-
-       err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
-       if (err)
-               goto out_free_oe;
-
-       err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath);
-       if (err)
-               goto out_put_upperpath;
+       sb->s_stack_depth = 0;
+       if (ufs->config.upperdir) {
+               /* FIXME: workdir is not needed for a R/O mount */
+               if (!ufs->config.workdir) {
+                       pr_err("overlayfs: missing 'workdir'\n");
+                       goto out_free_config;
+               }
 
-       err = ovl_mount_dir(ufs->config.workdir, &workpath);
-       if (err)
-               goto out_put_lowerpath;
+               err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
+               if (err)
+                       goto out_free_config;
 
-       err = -EINVAL;
-       if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
-           !S_ISDIR(lowerpath.dentry->d_inode->i_mode) ||
-           !S_ISDIR(workpath.dentry->d_inode->i_mode)) {
-               pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n");
-               goto out_put_workpath;
-       }
+               err = ovl_mount_dir(ufs->config.workdir, &workpath);
+               if (err)
+                       goto out_put_upperpath;
 
-       if (upperpath.mnt != workpath.mnt) {
-               pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
-               goto out_put_workpath;
-       }
-       if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
-               pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
-               goto out_put_workpath;
+               err = -EINVAL;
+               if (upperpath.mnt != workpath.mnt) {
+                       pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+                       goto out_put_workpath;
+               }
+               if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
+                       pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+                       goto out_put_workpath;
+               }
+               sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth;
        }
-
-       if (!ovl_is_allowed_fs_type(upperpath.dentry)) {
-               pr_err("overlayfs: filesystem of upperdir is not supported\n");
+       err = -ENOMEM;
+       lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL);
+       if (!lowertmp)
                goto out_put_workpath;
-       }
 
-       if (!ovl_is_allowed_fs_type(lowerpath.dentry)) {
-               pr_err("overlayfs: filesystem of lowerdir is not supported\n");
-               goto out_put_workpath;
-       }
+       err = -EINVAL;
+       stacklen = ovl_split_lowerdirs(lowertmp);
+       if (stacklen > OVL_MAX_STACK)
+               goto out_free_lowertmp;
+
+       stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
+       if (!stack)
+               goto out_free_lowertmp;
+
+       lower = lowertmp;
+       for (numlower = 0; numlower < stacklen; numlower++) {
+               err = ovl_lower_dir(lower, &stack[numlower],
+                                   &ufs->lower_namelen, &sb->s_stack_depth);
+               if (err)
+                       goto out_put_lowerpath;
 
-       err = vfs_statfs(&lowerpath, &statfs);
-       if (err) {
-               pr_err("overlayfs: statfs failed on lowerpath\n");
-               goto out_put_workpath;
+               lower = strchr(lower, '\0') + 1;
        }
-       ufs->lower_namelen = statfs.f_namelen;
-
-       sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
-                               lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
 
        err = -EINVAL;
+       sb->s_stack_depth++;
        if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
                pr_err("overlayfs: maximum fs stacking depth exceeded\n");
-               goto out_put_workpath;
+               goto out_put_lowerpath;
        }
 
-       ufs->upper_mnt = clone_private_mount(&upperpath);
-       err = PTR_ERR(ufs->upper_mnt);
-       if (IS_ERR(ufs->upper_mnt)) {
-               pr_err("overlayfs: failed to clone upperpath\n");
-               goto out_put_workpath;
-       }
+       if (ufs->config.upperdir) {
+               ufs->upper_mnt = clone_private_mount(&upperpath);
+               err = PTR_ERR(ufs->upper_mnt);
+               if (IS_ERR(ufs->upper_mnt)) {
+                       pr_err("overlayfs: failed to clone upperpath\n");
+                       goto out_put_lowerpath;
+               }
 
-       ufs->lower_mnt = clone_private_mount(&lowerpath);
-       err = PTR_ERR(ufs->lower_mnt);
-       if (IS_ERR(ufs->lower_mnt)) {
-               pr_err("overlayfs: failed to clone lowerpath\n");
-               goto out_put_upper_mnt;
+               ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
+               err = PTR_ERR(ufs->workdir);
+               if (IS_ERR(ufs->workdir)) {
+                       pr_err("overlayfs: failed to create directory %s/%s\n",
+                              ufs->config.workdir, OVL_WORKDIR_NAME);
+                       goto out_put_upper_mnt;
+               }
        }
 
-       ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
-       err = PTR_ERR(ufs->workdir);
-       if (IS_ERR(ufs->workdir)) {
-               pr_err("overlayfs: failed to create directory %s/%s\n",
-                      ufs->config.workdir, OVL_WORKDIR_NAME);
-               goto out_put_lower_mnt;
-       }
+       err = -ENOMEM;
+       ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL);
+       if (ufs->lower_mnt == NULL)
+               goto out_put_workdir;
+       for (i = 0; i < numlower; i++) {
+               struct vfsmount *mnt = clone_private_mount(&stack[i]);
 
-       /*
-        * Make lower_mnt R/O.  That way fchmod/fchown on lower file
-        * will fail instead of modifying lower fs.
-        */
-       ufs->lower_mnt->mnt_flags |= MNT_READONLY;
+               err = PTR_ERR(mnt);
+               if (IS_ERR(mnt)) {
+                       pr_err("overlayfs: failed to clone lowerpath\n");
+                       goto out_put_lower_mnt;
+               }
+               /*
+                * Make lower_mnt R/O.  That way fchmod/fchown on lower file
+                * will fail instead of modifying lower fs.
+                */
+               mnt->mnt_flags |= MNT_READONLY;
+
+               ufs->lower_mnt[ufs->numlower] = mnt;
+               ufs->numlower++;
+       }
 
-       /* If the upper fs is r/o, we mark overlayfs r/o too */
-       if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
+       /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */
+       if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))
                sb->s_flags |= MS_RDONLY;
 
        sb->s_d_op = &ovl_dentry_operations;
 
        err = -ENOMEM;
-       root_inode = ovl_new_inode(sb, S_IFDIR, oe);
-       if (!root_inode)
-               goto out_put_workdir;
+       oe = ovl_alloc_entry(numlower);
+       if (!oe)
+               goto out_put_lower_mnt;
 
-       root_dentry = d_make_root(root_inode);
+       root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
        if (!root_dentry)
-               goto out_put_workdir;
+               goto out_free_oe;
 
        mntput(upperpath.mnt);
-       mntput(lowerpath.mnt);
+       for (i = 0; i < numlower; i++)
+               mntput(stack[i].mnt);
        path_put(&workpath);
+       kfree(lowertmp);
 
        oe->__upperdentry = upperpath.dentry;
-       oe->lowerdentry = lowerpath.dentry;
+       for (i = 0; i < numlower; i++) {
+               oe->lowerstack[i].dentry = stack[i].dentry;
+               oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+       }
 
        root_dentry->d_fsdata = oe;
 
@@ -782,20 +968,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        return 0;
 
+out_free_oe:
+       kfree(oe);
+out_put_lower_mnt:
+       for (i = 0; i < ufs->numlower; i++)
+               mntput(ufs->lower_mnt[i]);
+       kfree(ufs->lower_mnt);
 out_put_workdir:
        dput(ufs->workdir);
-out_put_lower_mnt:
-       mntput(ufs->lower_mnt);
 out_put_upper_mnt:
        mntput(ufs->upper_mnt);
+out_put_lowerpath:
+       for (i = 0; i < numlower; i++)
+               path_put(&stack[i]);
+       kfree(stack);
+out_free_lowertmp:
+       kfree(lowertmp);
 out_put_workpath:
        path_put(&workpath);
-out_put_lowerpath:
-       path_put(&lowerpath);
 out_put_upperpath:
        path_put(&upperpath);
-out_free_oe:
-       kfree(oe);
 out_free_config:
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
index 0855f772cd41599d6c1d1091e7da616d32cccf53..3a48bb789c9f3e4eb227214f72798b90c52af5dc 100644 (file)
@@ -564,13 +564,11 @@ posix_acl_create(struct inode *dir, umode_t *mode,
 
        *acl = posix_acl_clone(p, GFP_NOFS);
        if (!*acl)
-               return -ENOMEM;
+               goto no_mem;
 
        ret = posix_acl_create_masq(*acl, mode);
-       if (ret < 0) {
-               posix_acl_release(*acl);
-               return -ENOMEM;
-       }
+       if (ret < 0)
+               goto no_mem_clone;
 
        if (ret == 0) {
                posix_acl_release(*acl);
@@ -591,6 +589,12 @@ no_acl:
        *default_acl = NULL;
        *acl = NULL;
        return 0;
+
+no_mem_clone:
+       posix_acl_release(*acl);
+no_mem:
+       posix_acl_release(p);
+       return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(posix_acl_create);
 
@@ -772,7 +776,7 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
 
        if (!IS_POSIXACL(dentry->d_inode))
                return -EOPNOTSUPP;
-       if (S_ISLNK(dentry->d_inode->i_mode))
+       if (d_is_symlink(dentry))
                return -EOPNOTSUPP;
 
        acl = get_acl(dentry->d_inode, type);
@@ -832,7 +836,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size,
 
        if (!IS_POSIXACL(dentry->d_inode))
                return -EOPNOTSUPP;
-       if (S_ISLNK(dentry->d_inode->i_mode))
+       if (d_is_symlink(dentry))
                return -EOPNOTSUPP;
 
        if (type == ACL_TYPE_ACCESS)
index 3309f59d421ba6e5b806651dcc1d926143470953..be65b208213518f267d9fe16b5652b5021700cf5 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/mount.h>
 #include <linux/init.h>
 #include <linux/idr.h>
-#include <linux/namei.h>
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum)
        spin_unlock_irqrestore(&proc_inum_lock, flags);
 }
 
-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, __PDE_DATA(dentry->d_inode));
-       return NULL;
-}
-
-static const struct inode_operations proc_link_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = proc_follow_link,
-};
-
 /*
  * Don't create negative dentries here, return -ENOENT by hand
  * instead.
index 13a50a32652dc868ab94084a1e07fb4a809e43da..7697b6621cfd5b13051318ba15920646942ed528 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/mount.h>
 #include <linux/magic.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
 };
 #endif
 
+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+       struct proc_dir_entry *pde = PDE(dentry->d_inode);
+       if (unlikely(!use_pde(pde)))
+               return ERR_PTR(-EINVAL);
+       nd_set_link(nd, pde->data);
+       return pde;
+}
+
+static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+{
+       unuse_pde(p);
+}
+
+const struct inode_operations proc_link_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = proc_follow_link,
+       .put_link       = proc_put_link,
+};
+
 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
        struct inode *inode = new_inode_pseudo(sb);
index 6fcdba573e0fa2471e668e96217f366bed749050..c835b94c0cd3afec0bea4017ca8bacd63b32ff8e 100644 (file)
@@ -200,6 +200,7 @@ struct pde_opener {
        int closing;
        struct completion *c;
 };
+extern const struct inode_operations proc_link_inode_operations;
 
 extern const struct inode_operations proc_pid_link_inode_operations;
 
index 04b06146bae224f0f9177da58fee0c6fd6d3e747..4e781e697c90bce3b42f0e0097fe46965bbb5258 100644 (file)
@@ -266,7 +266,7 @@ static int reiserfs_for_each_xattr(struct inode *inode,
                for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
                        struct dentry *dentry = buf.dentries[i];
 
-                       if (!S_ISDIR(dentry->d_inode->i_mode))
+                       if (!d_is_dir(dentry))
                                err = action(dentry, data);
 
                        dput(dentry);
@@ -322,7 +322,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
        struct inode *dir = dentry->d_parent->d_inode;
 
        /* This is the xattr dir, handle specially. */
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                return xattr_rmdir(dir, dentry);
 
        return xattr_unlink(dir, dentry);
index 65a53efc1cf4a5d5ce5cb7c6eca39fa28b0d2f7f..2b7dc90ccdbb4ae1ceac7725967cec2a643e21c7 100644 (file)
@@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
        if (!(sc->gfp_mask & __GFP_FS))
                return SHRINK_STOP;
 
-       if (!grab_super_passive(sb))
+       if (!trylock_super(sb))
                return SHRINK_STOP;
 
        if (sb->s_op->nr_cached_objects)
@@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
                freed += sb->s_op->free_cached_objects(sb, sc);
        }
 
-       drop_super(sb);
+       up_read(&sb->s_umount);
        return freed;
 }
 
@@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
        sb = container_of(shrink, struct super_block, s_shrink);
 
        /*
-        * Don't call grab_super_passive as it is a potential
+        * Don't call trylock_super as it is a potential
         * scalability bottleneck. The counts could get updated
         * between super_cache_count and super_cache_scan anyway.
         * Call to super_cache_count with shrinker_rwsem held
@@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
 }
 
 /*
- *     grab_super_passive - acquire a passive reference
+ *     trylock_super - try to grab ->s_umount shared
  *     @sb: reference we are trying to grab
  *
- *     Tries to acquire a passive reference. This is used in places where we
+ *     Try to prevent fs shutdown.  This is used in places where we
  *     cannot take an active reference but we need to ensure that the
- *     superblock does not go away while we are working on it. It returns
- *     false if a reference was not gained, and returns true with the s_umount
- *     lock held in read mode if a reference is gained. On successful return,
- *     the caller must drop the s_umount lock and the passive reference when
- *     done.
+ *     filesystem is not shut down while we are working on it. It returns
+ *     false if we cannot acquire s_umount or if we lose the race and
+ *     filesystem already got into shutdown, and returns true with the s_umount
+ *     lock held in read mode in case of success. On successful return,
+ *     the caller must drop the s_umount lock when done.
+ *
+ *     Note that unlike get_super() et.al. this one does *not* bump ->s_count.
+ *     The reason why it's safe is that we are OK with doing trylock instead
+ *     of down_read().  There's a couple of places that are OK with that, but
+ *     it's very much not a general-purpose interface.
  */
-bool grab_super_passive(struct super_block *sb)
+bool trylock_super(struct super_block *sb)
 {
-       spin_lock(&sb_lock);
-       if (hlist_unhashed(&sb->s_instances)) {
-               spin_unlock(&sb_lock);
-               return false;
-       }
-
-       sb->s_count++;
-       spin_unlock(&sb_lock);
-
        if (down_read_trylock(&sb->s_umount)) {
-               if (sb->s_root && (sb->s_flags & MS_BORN))
+               if (!hlist_unhashed(&sb->s_instances) &&
+                   sb->s_root && (sb->s_flags & MS_BORN))
                        return true;
                up_read(&sb->s_umount);
        }
 
-       put_super(sb);
        return false;
 }
 
index d61799949580a497ccae45883cb2f4c3f8e34495..df6828570e874ae423c48309f51a91d16ce949fd 100644 (file)
@@ -121,3 +121,4 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
 xfs-$(CONFIG_PROC_FS)          += xfs_stats.o
 xfs-$(CONFIG_SYSCTL)           += xfs_sysctl.o
 xfs-$(CONFIG_COMPAT)           += xfs_ioctl32.o
+xfs-$(CONFIG_NFSD_PNFS)                += xfs_pnfs.o
index 5eb4a14e0a0fdc53d45652a3f84b323f040aa2f1..b97359ba2648f12fca1bfa98e25be6efc719c8e3 100644 (file)
@@ -30,6 +30,7 @@
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_log.h"
+#include "xfs_pnfs.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
@@ -245,4 +246,9 @@ const struct export_operations xfs_export_operations = {
        .fh_to_parent           = xfs_fs_fh_to_parent,
        .get_parent             = xfs_fs_get_parent,
        .commit_metadata        = xfs_fs_nfs_commit_metadata,
+#ifdef CONFIG_NFSD_PNFS
+       .get_uuid               = xfs_fs_get_uuid,
+       .map_blocks             = xfs_fs_map_blocks,
+       .commit_blocks          = xfs_fs_commit_blocks,
+#endif
 };
index 1cdba95c78cb3e2475de29e0b6d88df3604e4cdf..a2e1cb8a568bf9d45e32c43539a2e6f8b56d83f4 100644 (file)
@@ -36,6 +36,7 @@
 #include "xfs_trace.h"
 #include "xfs_log.h"
 #include "xfs_icache.h"
+#include "xfs_pnfs.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -396,7 +397,8 @@ STATIC int                          /* error (positive) */
 xfs_zero_last_block(
        struct xfs_inode        *ip,
        xfs_fsize_t             offset,
-       xfs_fsize_t             isize)
+       xfs_fsize_t             isize,
+       bool                    *did_zeroing)
 {
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           last_fsb = XFS_B_TO_FSBT(mp, isize);
@@ -424,6 +426,7 @@ xfs_zero_last_block(
        zero_len = mp->m_sb.sb_blocksize - zero_offset;
        if (isize + zero_len > offset)
                zero_len = offset - isize;
+       *did_zeroing = true;
        return xfs_iozero(ip, isize, zero_len);
 }
 
@@ -442,7 +445,8 @@ int                                 /* error (positive) */
 xfs_zero_eof(
        struct xfs_inode        *ip,
        xfs_off_t               offset,         /* starting I/O offset */
-       xfs_fsize_t             isize)          /* current inode size */
+       xfs_fsize_t             isize,          /* current inode size */
+       bool                    *did_zeroing)
 {
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           start_zero_fsb;
@@ -464,7 +468,7 @@ xfs_zero_eof(
         * We only zero a part of that block so it is handled specially.
         */
        if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
-               error = xfs_zero_last_block(ip, offset, isize);
+               error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
                if (error)
                        return error;
        }
@@ -524,6 +528,7 @@ xfs_zero_eof(
                if (error)
                        return error;
 
+               *did_zeroing = true;
                start_zero_fsb = imap.br_startoff + imap.br_blockcount;
                ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
        }
@@ -554,6 +559,10 @@ restart:
        if (error)
                return error;
 
+       error = xfs_break_layouts(inode, iolock);
+       if (error)
+               return error;
+
        /*
         * If the offset is beyond the size of the file, we need to zero any
         * blocks that fall between the existing EOF and the start of this
@@ -562,13 +571,15 @@ restart:
         * having to redo all checks before.
         */
        if (*pos > i_size_read(inode)) {
+               bool    zero = false;
+
                if (*iolock == XFS_IOLOCK_SHARED) {
                        xfs_rw_iunlock(ip, *iolock);
                        *iolock = XFS_IOLOCK_EXCL;
                        xfs_rw_ilock(ip, *iolock);
                        goto restart;
                }
-               error = xfs_zero_eof(ip, *pos, i_size_read(inode));
+               error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
                if (error)
                        return error;
        }
@@ -822,6 +833,7 @@ xfs_file_fallocate(
        struct xfs_inode        *ip = XFS_I(inode);
        long                    error;
        enum xfs_prealloc_flags flags = 0;
+       uint                    iolock = XFS_IOLOCK_EXCL;
        loff_t                  new_size = 0;
 
        if (!S_ISREG(inode->i_mode))
@@ -830,7 +842,11 @@ xfs_file_fallocate(
                     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
                return -EOPNOTSUPP;
 
-       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       xfs_ilock(ip, iolock);
+       error = xfs_break_layouts(inode, &iolock);
+       if (error)
+               goto out_unlock;
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                error = xfs_free_file_space(ip, offset, len);
                if (error)
@@ -894,7 +910,7 @@ xfs_file_fallocate(
        }
 
 out_unlock:
-       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       xfs_iunlock(ip, iolock);
        return error;
 }
 
index fba6532efba44d0d7114baa24e358716aeb3c922..74efe5b760dcc2907280db8e374afe6f5914d7b3 100644 (file)
@@ -602,6 +602,12 @@ xfs_growfs_data(
        if (!mutex_trylock(&mp->m_growlock))
                return -EWOULDBLOCK;
        error = xfs_growfs_data_private(mp, in);
+       /*
+        * Increment the generation unconditionally, the error could be from
+        * updating the secondary superblocks, in which case the new size
+        * is live already.
+        */
+       mp->m_generation++;
        mutex_unlock(&mp->m_growlock);
        return error;
 }
index daafa1f6d2607722b338c8b5da458a979558d9b3..6163767aa8562f6d611a1442ed4f299aa85bceea 100644 (file)
@@ -2867,6 +2867,10 @@ xfs_rename(
         * Handle RENAME_EXCHANGE flags
         */
        if (flags & RENAME_EXCHANGE) {
+               if (target_ip == NULL) {
+                       error = -EINVAL;
+                       goto error_return;
+               }
                error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
                                         target_dp, target_name, target_ip,
                                         &free_list, &first_block, spaceres);
index 86cd6b39bed7be1dc4bd72be82e9a40b9c8b9825..a1cd55f3f351e1361e2a3ea790f88f5f5070e7e3 100644 (file)
@@ -384,10 +384,11 @@ enum xfs_prealloc_flags {
        XFS_PREALLOC_INVISIBLE  = (1 << 4),
 };
 
-int            xfs_update_prealloc_flags(struct xfs_inode *,
-                       enum xfs_prealloc_flags);
-int            xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
-int            xfs_iozero(struct xfs_inode *, loff_t, size_t);
+int    xfs_update_prealloc_flags(struct xfs_inode *ip,
+                                 enum xfs_prealloc_flags flags);
+int    xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
+                    xfs_fsize_t isize, bool *did_zeroing);
+int    xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
 
 
 #define IHOLD(ip) \
index f7afb86c91487fc0a89c98578a28b3a1dfda83e0..ac4feae45eb308c39629f177c0b6620fae77fb69 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_trans.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -286,7 +287,7 @@ xfs_readlink_by_handle(
                return PTR_ERR(dentry);
 
        /* Restrict this handle operation to symlinks only. */
-       if (!S_ISLNK(dentry->d_inode->i_mode)) {
+       if (!d_is_symlink(dentry)) {
                error = -EINVAL;
                goto out_dput;
        }
@@ -608,6 +609,7 @@ xfs_ioc_space(
 {
        struct iattr            iattr;
        enum xfs_prealloc_flags flags = 0;
+       uint                    iolock = XFS_IOLOCK_EXCL;
        int                     error;
 
        /*
@@ -636,7 +638,10 @@ xfs_ioc_space(
        if (error)
                return error;
 
-       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       xfs_ilock(ip, iolock);
+       error = xfs_break_layouts(inode, &iolock);
+       if (error)
+               goto out_unlock;
 
        switch (bf->l_whence) {
        case 0: /*SEEK_SET*/
@@ -725,7 +730,7 @@ xfs_ioc_space(
        error = xfs_update_prealloc_flags(ip, flags);
 
 out_unlock:
-       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       xfs_iunlock(ip, iolock);
        mnt_drop_write_file(filp);
        return error;
 }
index ce80eeb8faa472fd225571de4df196dd6d689693..e53a903314225c030c45f694b4ffdaa509fa1ce8 100644 (file)
@@ -37,6 +37,7 @@
 #include "xfs_da_btree.h"
 #include "xfs_dir2.h"
 #include "xfs_trans_space.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -505,7 +506,7 @@ xfs_setattr_mode(
        inode->i_mode |= mode & ~S_IFMT;
 }
 
-static void
+void
 xfs_setattr_time(
        struct xfs_inode        *ip,
        struct iattr            *iattr)
@@ -750,6 +751,7 @@ xfs_setattr_size(
        int                     error;
        uint                    lock_flags = 0;
        uint                    commit_flags = 0;
+       bool                    did_zeroing = false;
 
        trace_xfs_setattr(ip);
 
@@ -793,20 +795,16 @@ xfs_setattr_size(
                return error;
 
        /*
-        * Now we can make the changes.  Before we join the inode to the
-        * transaction, take care of the part of the truncation that must be
-        * done without the inode lock.  This needs to be done before joining
-        * the inode to the transaction, because the inode cannot be unlocked
-        * once it is a part of the transaction.
+        * File data changes must be complete before we start the transaction to
+        * modify the inode.  This needs to be done before joining the inode to
+        * the transaction because the inode cannot be unlocked once it is a
+        * part of the transaction.
+        *
+        * Start with zeroing any data block beyond EOF that we may expose on
+        * file extension.
         */
        if (newsize > oldsize) {
-               /*
-                * Do the first part of growing a file: zero any data in the
-                * last block that is beyond the old EOF.  We need to do this
-                * before the inode is joined to the transaction to modify
-                * i_size.
-                */
-               error = xfs_zero_eof(ip, newsize, oldsize);
+               error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
                if (error)
                        return error;
        }
@@ -816,23 +814,18 @@ xfs_setattr_size(
         * any previous writes that are beyond the on disk EOF and the new
         * EOF that have not been written out need to be written here.  If we
         * do not write the data out, we expose ourselves to the null files
-        * problem.
-        *
-        * Only flush from the on disk size to the smaller of the in memory
-        * file size or the new size as that's the range we really care about
-        * here and prevents waiting for other data not within the range we
-        * care about here.
+        * problem. Note that this includes any block zeroing we did above;
+        * otherwise those blocks may not be zeroed after a crash.
         */
-       if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
+       if (newsize > ip->i_d.di_size &&
+           (oldsize != ip->i_d.di_size || did_zeroing)) {
                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                                      ip->i_d.di_size, newsize);
                if (error)
                        return error;
        }
 
-       /*
-        * Wait for all direct I/O to complete.
-        */
+       /* Now wait for all direct I/O to complete. */
        inode_dio_wait(inode);
 
        /*
@@ -979,9 +972,13 @@ xfs_vn_setattr(
        int                     error;
 
        if (iattr->ia_valid & ATTR_SIZE) {
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               error = xfs_setattr_size(ip, iattr);
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+               uint            iolock = XFS_IOLOCK_EXCL;
+
+               xfs_ilock(ip, iolock);
+               error = xfs_break_layouts(dentry->d_inode, &iolock);
+               if (!error)
+                       error = xfs_setattr_size(ip, iattr);
+               xfs_iunlock(ip, iolock);
        } else {
                error = xfs_setattr_nonsize(ip, iattr, 0);
        }
index 1c34e4335920021d5829c5507be2f2c6c6bf60e1..ea7a98e9cb7048820bfa69c5bc294ccbe4102709 100644 (file)
@@ -32,6 +32,7 @@ extern void xfs_setup_inode(struct xfs_inode *);
  */
 #define XFS_ATTR_NOACL         0x01    /* Don't call posix_acl_chmod */
 
+extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
                               int flags);
 extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
index a5b2ff8226535d44443ce08369162f01e1d0cce2..0d8abd6364d97e613d98d4f827814c7eb7beb207 100644 (file)
@@ -174,6 +174,17 @@ typedef struct xfs_mount {
        struct workqueue_struct *m_reclaim_workqueue;
        struct workqueue_struct *m_log_workqueue;
        struct workqueue_struct *m_eofblocks_workqueue;
+
+       /*
+        * Generation of the filesysyem layout.  This is incremented by each
+        * growfs, and used by the pNFS server to ensure the client updates
+        * its view of the block device once it gets a layout that might
+        * reference the newly added blocks.  Does not need to be persistent
+        * as long as we only allow file system size increments, but if we
+        * ever support shrinks it would have to be persisted in addition
+        * to various other kinds of pain inflicted on the pNFS server.
+        */
+       __uint32_t              m_generation;
 } xfs_mount_t;
 
 /*
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
new file mode 100644 (file)
index 0000000..365dd57
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include "xfs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_error.h"
+#include "xfs_iomap.h"
+#include "xfs_shared.h"
+#include "xfs_bit.h"
+#include "xfs_pnfs.h"
+
+/*
+ * Ensure that we do not have any outstanding pNFS layouts that can be used by
+ * clients to directly read from or write to this inode.  This must be called
+ * before every operation that can remove blocks from the extent map.
+ * Additionally we call it during the write operation, where aren't concerned
+ * about exposing unallocated blocks but just want to provide basic
+ * synchronization between a local writer and pNFS clients.  mmap writes would
+ * also benefit from this sort of synchronization, but due to the tricky locking
+ * rules in the page fault path we don't bother.
+ */
+int
+xfs_break_layouts(
+       struct inode            *inode,
+       uint                    *iolock)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       int                     error;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
+
+       while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+               xfs_iunlock(ip, *iolock);
+               error = break_layout(inode, true);
+               *iolock = XFS_IOLOCK_EXCL;
+               xfs_ilock(ip, *iolock);
+       }
+
+       return error;
+}
+
+/*
+ * Get a unique ID including its location so that the client can identify
+ * the exported device.
+ */
+int
+xfs_fs_get_uuid(
+       struct super_block      *sb,
+       u8                      *buf,
+       u32                     *len,
+       u64                     *offset)
+{
+       struct xfs_mount        *mp = XFS_M(sb);
+
+       printk_once(KERN_NOTICE
+"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
+               mp->m_fsname);
+
+       if (*len < sizeof(uuid_t))
+               return -EINVAL;
+
+       memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
+       *len = sizeof(uuid_t);
+       *offset = offsetof(struct xfs_dsb, sb_uuid);
+       return 0;
+}
+
+static void
+xfs_bmbt_to_iomap(
+       struct xfs_inode        *ip,
+       struct iomap            *iomap,
+       struct xfs_bmbt_irec    *imap)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       if (imap->br_startblock == HOLESTARTBLOCK) {
+               iomap->blkno = IOMAP_NULL_BLOCK;
+               iomap->type = IOMAP_HOLE;
+       } else if (imap->br_startblock == DELAYSTARTBLOCK) {
+               iomap->blkno = IOMAP_NULL_BLOCK;
+               iomap->type = IOMAP_DELALLOC;
+       } else {
+               iomap->blkno =
+                       XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
+               if (imap->br_state == XFS_EXT_UNWRITTEN)
+                       iomap->type = IOMAP_UNWRITTEN;
+               else
+                       iomap->type = IOMAP_MAPPED;
+       }
+       iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+       iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
+}
+
+/*
+ * Get a layout for the pNFS client.
+ */
+int
+xfs_fs_map_blocks(
+       struct inode            *inode,
+       loff_t                  offset,
+       u64                     length,
+       struct iomap            *iomap,
+       bool                    write,
+       u32                     *device_generation)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_bmbt_irec    imap;
+       xfs_fileoff_t           offset_fsb, end_fsb;
+       loff_t                  limit;
+       int                     bmapi_flags = XFS_BMAPI_ENTIRE;
+       int                     nimaps = 1;
+       uint                    lock_flags;
+       int                     error = 0;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       /*
+        * We can't export inodes residing on the realtime device.  The realtime
+        * device doesn't have a UUID to identify it, so the client has no way
+        * to find it.
+        */
+       if (XFS_IS_REALTIME_INODE(ip))
+               return -ENXIO;
+
+       /*
+        * Lock out any other I/O before we flush and invalidate the pagecache,
+        * and then hand out a layout to the remote system.  This is very
+        * similar to direct I/O, except that the synchronization is much more
+        * complicated.  See the comment near xfs_break_layouts for a detailed
+        * explanation.
+        */
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       error = -EINVAL;
+       limit = mp->m_super->s_maxbytes;
+       if (!write)
+               limit = max(limit, round_up(i_size_read(inode),
+                                    inode->i_sb->s_blocksize));
+       if (offset > limit)
+               goto out_unlock;
+       if (offset > limit - length)
+               length = limit - offset;
+
+       error = filemap_write_and_wait(inode->i_mapping);
+       if (error)
+               goto out_unlock;
+       error = invalidate_inode_pages2(inode->i_mapping);
+       if (WARN_ON_ONCE(error))
+               return error;
+
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+       lock_flags = xfs_ilock_data_map_shared(ip);
+       error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+                               &imap, &nimaps, bmapi_flags);
+       xfs_iunlock(ip, lock_flags);
+
+       if (error)
+               goto out_unlock;
+
+       if (write) {
+               enum xfs_prealloc_flags flags = 0;
+
+               ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+
+               if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
+                       error = xfs_iomap_write_direct(ip, offset, length,
+                                                      &imap, nimaps);
+                       if (error)
+                               goto out_unlock;
+
+                       /*
+                        * Ensure the next transaction is committed
+                        * synchronously so that the blocks allocated and
+                        * handed out to the client are guaranteed to be
+                        * present even after a server crash.
+                        */
+                       flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
+               }
+
+               error = xfs_update_prealloc_flags(ip, flags);
+               if (error)
+                       goto out_unlock;
+       }
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+
+       xfs_bmbt_to_iomap(ip, iomap, &imap);
+       *device_generation = mp->m_generation;
+       return error;
+out_unlock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+}
+
+/*
+ * Ensure the size update falls into a valid allocated block.
+ */
+static int
+xfs_pnfs_validate_isize(
+       struct xfs_inode        *ip,
+       xfs_off_t               isize)
+{
+       struct xfs_bmbt_irec    imap;
+       int                     nimaps = 1;
+       int                     error = 0;
+
+       xfs_ilock(ip, XFS_ILOCK_SHARED);
+       error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
+                               &imap, &nimaps, 0);
+       xfs_iunlock(ip, XFS_ILOCK_SHARED);
+       if (error)
+               return error;
+
+       if (imap.br_startblock == HOLESTARTBLOCK ||
+           imap.br_startblock == DELAYSTARTBLOCK ||
+           imap.br_state == XFS_EXT_UNWRITTEN)
+               return -EIO;
+       return 0;
+}
+
+/*
+ * Make sure the blocks described by maps are stable on disk.  This includes
+ * converting any unwritten extents, flushing the disk cache and updating the
+ * time stamps.
+ *
+ * Note that we rely on the caller to always send us a timestamp update so that
+ * we always commit a transaction here.  If that stops being true we will have
+ * to manually flush the cache here similar to what the fsync code path does
+ * for datasyncs on files that have no dirty metadata.
+ */
+int
+xfs_fs_commit_blocks(
+       struct inode            *inode,
+       struct iomap            *maps,
+       int                     nr_maps,
+       struct iattr            *iattr)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       bool                    update_isize = false;
+       int                     error, i;
+       loff_t                  size;
+
+       ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
+
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       size = i_size_read(inode);
+       if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
+               update_isize = true;
+               size = iattr->ia_size;
+       }
+
+       for (i = 0; i < nr_maps; i++) {
+               u64 start, length, end;
+
+               start = maps[i].offset;
+               if (start > size)
+                       continue;
+
+               end = start + maps[i].length;
+               if (end > size)
+                       end = size;
+
+               length = end - start;
+               if (!length)
+                       continue;
+       
+               /*
+                * Make sure reads through the pagecache see the new data.
+                */
+               error = invalidate_inode_pages2_range(inode->i_mapping,
+                                       start >> PAGE_CACHE_SHIFT,
+                                       (end - 1) >> PAGE_CACHE_SHIFT);
+               WARN_ON_ONCE(error);
+
+               error = xfs_iomap_write_unwritten(ip, start, length);
+               if (error)
+                       goto out_drop_iolock;
+       }
+
+       if (update_isize) {
+               error = xfs_pnfs_validate_isize(ip, size);
+               if (error)
+                       goto out_drop_iolock;
+       }
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_drop_iolock;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       xfs_setattr_time(ip, iattr);
+       if (update_isize) {
+               i_size_write(inode, iattr->ia_size);
+               ip->i_d.di_size = iattr->ia_size;
+       }
+
+       xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
+
+out_drop_iolock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+}
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
new file mode 100644 (file)
index 0000000..b7fbfce
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _XFS_PNFS_H
+#define _XFS_PNFS_H 1
+
+#ifdef CONFIG_NFSD_PNFS
+int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
+               struct iomap *iomap, bool write, u32 *device_generation);
+int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
+               struct iattr *iattr);
+
+int xfs_break_layouts(struct inode *inode, uint *iolock);
+#else
+static inline int xfs_break_layouts(struct inode *inode, uint *iolock)
+{
+       return 0;
+}
+#endif /* CONFIG_NFSD_PNFS */
+#endif /* _XFS_PNFS_H */
index 53cc2aaf8d2bdfedc12247ae5055feaa95ae31a6..fbbb9e62e274b525a03e7cb1373cae35b49ace1b 100644 (file)
@@ -836,6 +836,11 @@ xfs_qm_reset_dqcounts(
                 */
                xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
                            "xfs_quotacheck");
+               /*
+                * Reset type in case we are reusing group quota file for
+                * project quotas or vice versa
+                */
+               ddq->d_flags = type;
                ddq->d_bcount = 0;
                ddq->d_icount = 0;
                ddq->d_rtbcount = 0;
diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h
new file mode 100644 (file)
index 0000000..da37e12
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * acpi_lpat.h - LPAT table processing functions
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ACPI_LPAT_H
+#define ACPI_LPAT_H
+
+struct acpi_lpat {
+       int temp;
+       int raw;
+};
+
+struct acpi_lpat_conversion_table {
+       struct acpi_lpat *lpat;
+       int lpat_count;
+};
+
+#ifdef CONFIG_ACPI
+
+int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+                         int raw);
+int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+                         int temp);
+struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
+                                                                 handle);
+void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+                                    *lpat_table);
+
+#else
+static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+                                int raw)
+{
+       return 0;
+}
+
+static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+                                int temp)
+{
+       return 0;
+}
+
+static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(
+                                                       acpi_handle handle)
+{
+       return NULL;
+}
+
+static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+                                           *lpat_table)
+{
+}
+
+#endif
+#endif
index ce37349860fece8cdf991165db8c8849a677e270..7389c87116a09092cf7b8e70f6119a5efc03a03c 100644 (file)
@@ -15,6 +15,9 @@ struct pci_dev;
 #ifdef CONFIG_PCI
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+                                    unsigned long offset,
+                                    unsigned long maxlen);
 /* Create a virtual mapping cookie for a port on a given PCI device.
  * Do not call this directly, it exists to make it easier for architectures
  * to override */
@@ -30,6 +33,13 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
 {
        return NULL;
 }
+
+static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+                                           unsigned long offset,
+                                           unsigned long maxlen)
+{
+       return NULL;
+}
 #endif
 
 #endif /* __ASM_GENERIC_IO_H */
index 180ad0e6de21dd3f0332335f90c122692a553838..d016dc57f0073eede1a5467b798fa35dfa779da5 100644 (file)
        INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
 
 #define _INTEL_BDW_M_IDS(gt, info) \
-       _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
+       _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
        _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
-       _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
+       _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
        _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
 
 #define _INTEL_BDW_D_IDS(gt, info) \
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
new file mode 100644 (file)
index 0000000..04e8db2
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ASM9260_H
+#define _DT_BINDINGS_CLK_ASM9260_H
+
+/* ahb gate */
+#define CLKID_AHB_ROM          0
+#define CLKID_AHB_RAM          1
+#define CLKID_AHB_GPIO         2
+#define CLKID_AHB_MAC          3
+#define CLKID_AHB_EMI          4
+#define CLKID_AHB_USB0         5
+#define CLKID_AHB_USB1         6
+#define CLKID_AHB_DMA0         7
+#define CLKID_AHB_DMA1         8
+#define CLKID_AHB_UART0                9
+#define CLKID_AHB_UART1                10
+#define CLKID_AHB_UART2                11
+#define CLKID_AHB_UART3                12
+#define CLKID_AHB_UART4                13
+#define CLKID_AHB_UART5                14
+#define CLKID_AHB_UART6                15
+#define CLKID_AHB_UART7                16
+#define CLKID_AHB_UART8                17
+#define CLKID_AHB_UART9                18
+#define CLKID_AHB_I2S0         19
+#define CLKID_AHB_I2C0         20
+#define CLKID_AHB_I2C1         21
+#define CLKID_AHB_SSP0         22
+#define CLKID_AHB_IOCONFIG     23
+#define CLKID_AHB_WDT          24
+#define CLKID_AHB_CAN0         25
+#define CLKID_AHB_CAN1         26
+#define CLKID_AHB_MPWM         27
+#define CLKID_AHB_SPI0         28
+#define CLKID_AHB_SPI1         29
+#define CLKID_AHB_QEI          30
+#define CLKID_AHB_QUADSPI0     31
+#define CLKID_AHB_CAMIF                32
+#define CLKID_AHB_LCDIF                33
+#define CLKID_AHB_TIMER0       34
+#define CLKID_AHB_TIMER1       35
+#define CLKID_AHB_TIMER2       36
+#define CLKID_AHB_TIMER3       37
+#define CLKID_AHB_IRQ          38
+#define CLKID_AHB_RTC          39
+#define CLKID_AHB_NAND         40
+#define CLKID_AHB_ADC0         41
+#define CLKID_AHB_LED          42
+#define CLKID_AHB_DAC0         43
+#define CLKID_AHB_LCD          44
+#define CLKID_AHB_I2S1         45
+#define CLKID_AHB_MAC1         46
+
+/* devider */
+#define CLKID_SYS_CPU          47
+#define CLKID_SYS_AHB          48
+#define CLKID_SYS_I2S0M                49
+#define CLKID_SYS_I2S0S                50
+#define CLKID_SYS_I2S1M                51
+#define CLKID_SYS_I2S1S                52
+#define CLKID_SYS_UART0                53
+#define CLKID_SYS_UART1                54
+#define CLKID_SYS_UART2                55
+#define CLKID_SYS_UART3                56
+#define CLKID_SYS_UART4                56
+#define CLKID_SYS_UART5                57
+#define CLKID_SYS_UART6                58
+#define CLKID_SYS_UART7                59
+#define CLKID_SYS_UART8                60
+#define CLKID_SYS_UART9                61
+#define CLKID_SYS_SPI0         62
+#define CLKID_SYS_SPI1         63
+#define CLKID_SYS_QUADSPI      64
+#define CLKID_SYS_SSP0         65
+#define CLKID_SYS_NAND         66
+#define CLKID_SYS_TRACE                67
+#define CLKID_SYS_CAMM         68
+#define CLKID_SYS_WDT          69
+#define CLKID_SYS_CLKOUT       70
+#define CLKID_SYS_MAC          71
+#define CLKID_SYS_LCD          72
+#define CLKID_SYS_ADCANA       73
+
+#define MAX_CLKS               74
+#endif
index 34fe28c622d0a6c2cbfbef348be34dfefa2b59f9..c4b1676ea674abb5c6f598525b3f0ca09e39e1e7 100644 (file)
 #define CLK_DIV_MCUISP1                453 /* Exynos4x12 only */
 #define CLK_DIV_ACLK200                454 /* Exynos4x12 only */
 #define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
+#define CLK_DIV_ACP            456
+#define CLK_DIV_DMC            457
+#define CLK_DIV_C2C            458 /* Exynos4x12 only */
+#define CLK_DIV_GDL            459
+#define CLK_DIV_GDR            460
 
 /* must be greater than maximal clock id */
-#define CLK_NR_CLKS            456
+#define CLK_NR_CLKS            461
 
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
index 8e4681b07ae79fcaaf6b9e563ac360da6ac198d0..e33c75a3c09dc7caf26575a7e3469788aa6cc234 100644 (file)
 #define DOUT_SCLK_CC_PLL               4
 #define DOUT_SCLK_MFC_PLL              5
 #define DOUT_ACLK_CCORE_133            6
-#define TOPC_NR_CLK                    7
+#define DOUT_ACLK_MSCL_532             7
+#define ACLK_MSCL_532                  8
+#define DOUT_SCLK_AUD_PLL              9
+#define FOUT_AUD_PLL                   10
+#define TOPC_NR_CLK                    11
 
 /* TOP0 */
 #define DOUT_ACLK_PERIC1               1
 #define CLK_SCLK_UART1                 4
 #define CLK_SCLK_UART2                 5
 #define CLK_SCLK_UART3                 6
-#define TOP0_NR_CLK                    7
+#define CLK_SCLK_SPI0                  7
+#define CLK_SCLK_SPI1                  8
+#define CLK_SCLK_SPI2                  9
+#define CLK_SCLK_SPI3                  10
+#define CLK_SCLK_SPI4                  11
+#define CLK_SCLK_SPDIF                 12
+#define CLK_SCLK_PCM1                  13
+#define CLK_SCLK_I2S1                  14
+#define TOP0_NR_CLK                    15
 
 /* TOP1 */
 #define DOUT_ACLK_FSYS1_200            1
 #define PCLK_HSI2C6                    9
 #define PCLK_HSI2C7                    10
 #define PCLK_HSI2C8                    11
-#define PERIC1_NR_CLK                  12
+#define PCLK_SPI0                      12
+#define PCLK_SPI1                      13
+#define PCLK_SPI2                      14
+#define PCLK_SPI3                      15
+#define PCLK_SPI4                      16
+#define SCLK_SPI0                      17
+#define SCLK_SPI1                      18
+#define SCLK_SPI2                      19
+#define SCLK_SPI3                      20
+#define SCLK_SPI4                      21
+#define PCLK_I2S1                      22
+#define PCLK_PCM1                      23
+#define PCLK_SPDIF                     24
+#define SCLK_I2S1                      25
+#define SCLK_PCM1                      26
+#define SCLK_SPDIF                     27
+#define PERIC1_NR_CLK                  28
 
 /* PERIS */
 #define PCLK_CHIPID                    1
 
 /* FSYS0 */
 #define ACLK_MMC2                      1
-#define FSYS0_NR_CLK                   2
+#define ACLK_AXIUS_USBDRD30X_FSYS0X    2
+#define ACLK_USBDRD300                 3
+#define SCLK_USBDRD300_SUSPENDCLK      4
+#define SCLK_USBDRD300_REFCLK          5
+#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER         6
+#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER            7
+#define OSCCLK_PHY_CLKOUT_USB30_PHY            8
+#define ACLK_PDMA0                     9
+#define ACLK_PDMA1                     10
+#define FSYS0_NR_CLK                   11
 
 /* FSYS1 */
 #define ACLK_MMC1                      1
 #define ACLK_MMC0                      2
 #define FSYS1_NR_CLK                   3
 
+/* MSCL */
+#define USERMUX_ACLK_MSCL_532          1
+#define DOUT_PCLK_MSCL                 2
+#define ACLK_MSCL_0                    3
+#define ACLK_MSCL_1                    4
+#define ACLK_JPEG                      5
+#define ACLK_G2D                       6
+#define ACLK_LH_ASYNC_SI_MSCL_0                7
+#define ACLK_LH_ASYNC_SI_MSCL_1                8
+#define ACLK_AXI2ACEL_BRIDGE           9
+#define ACLK_XIU_MSCLX_0               10
+#define ACLK_XIU_MSCLX_1               11
+#define ACLK_QE_MSCL_0                 12
+#define ACLK_QE_MSCL_1                 13
+#define ACLK_QE_JPEG                   14
+#define ACLK_QE_G2D                    15
+#define ACLK_PPMU_MSCL_0               16
+#define ACLK_PPMU_MSCL_1               17
+#define ACLK_MSCLNP_133                        18
+#define ACLK_AHB2APB_MSCL0P            19
+#define ACLK_AHB2APB_MSCL1P            20
+
+#define PCLK_MSCL_0                    21
+#define PCLK_MSCL_1                    22
+#define PCLK_JPEG                      23
+#define PCLK_G2D                       24
+#define PCLK_QE_MSCL_0                 25
+#define PCLK_QE_MSCL_1                 26
+#define PCLK_QE_JPEG                   27
+#define PCLK_QE_G2D                    28
+#define PCLK_PPMU_MSCL_0               29
+#define PCLK_PPMU_MSCL_1               30
+#define PCLK_AXI2ACEL_BRIDGE           31
+#define PCLK_PMU_MSCL                  32
+#define MSCL_NR_CLK                    33
+
+/* AUD */
+#define SCLK_I2S                       1
+#define SCLK_PCM                       2
+#define PCLK_I2S                       3
+#define PCLK_PCM                       4
+#define ACLK_ADMA                      5
+#define AUD_NR_CLK                     6
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
index b857cadb0bd40ef8c2843693b6fa8c9f7bb11528..04fb29ae30e69801f37713f1284d165396f8525a 100644 (file)
 #define PLL0_VOTE                              221
 #define PLL3                                   222
 #define PLL3_VOTE                              223
-#define PLL4                                   224
 #define PLL4_VOTE                              225
 #define PLL8                                   226
 #define PLL8_VOTE                              227
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
new file mode 100644 (file)
index 0000000..4e944b8
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_LCC_IPQ806X_H
+
+#define PLL4                           0
+#define MI2S_OSR_SRC                   1
+#define MI2S_OSR_CLK                   2
+#define MI2S_DIV_CLK                   3
+#define MI2S_BIT_DIV_CLK               4
+#define MI2S_BIT_CLK                   5
+#define PCM_SRC                                6
+#define PCM_CLK_OUT                    7
+#define PCM_CLK                                8
+#define SPDIF_SRC                      9
+#define SPDIF_CLK                      10
+#define AHBIX_CLK                      11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h
new file mode 100644 (file)
index 0000000..4fb2aa6
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H
+#define _DT_BINDINGS_CLK_LCC_MSM8960_H
+
+#define PLL4                           0
+#define MI2S_OSR_SRC                   1
+#define MI2S_OSR_CLK                   2
+#define MI2S_DIV_CLK                   3
+#define MI2S_BIT_DIV_CLK               4
+#define MI2S_BIT_CLK                   5
+#define PCM_SRC                                6
+#define PCM_CLK_OUT                    7
+#define PCM_CLK                                8
+#define SLIMBUS_SRC                    9
+#define AUDIO_SLIMBUS_CLK              10
+#define SPS_SLIMBUS_CLK                        11
+#define CODEC_I2S_MIC_OSR_SRC          12
+#define CODEC_I2S_MIC_OSR_CLK          13
+#define CODEC_I2S_MIC_DIV_CLK          14
+#define CODEC_I2S_MIC_BIT_DIV_CLK      15
+#define CODEC_I2S_MIC_BIT_CLK          16
+#define SPARE_I2S_MIC_OSR_SRC          17
+#define SPARE_I2S_MIC_OSR_CLK          18
+#define SPARE_I2S_MIC_DIV_CLK          19
+#define SPARE_I2S_MIC_BIT_DIV_CLK      20
+#define SPARE_I2S_MIC_BIT_CLK          21
+#define CODEC_I2S_SPKR_OSR_SRC         22
+#define CODEC_I2S_SPKR_OSR_CLK         23
+#define CODEC_I2S_SPKR_DIV_CLK         24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK     25
+#define CODEC_I2S_SPKR_BIT_CLK         26
+#define SPARE_I2S_SPKR_OSR_SRC         27
+#define SPARE_I2S_SPKR_OSR_CLK         28
+#define SPARE_I2S_SPKR_DIV_CLK         29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK     30
+#define SPARE_I2S_SPKR_BIT_CLK         31
+
+#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
new file mode 100644 (file)
index 0000000..ae2eb17
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-car or
+ * nvidia,tegra132-car.
+ *
+ * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
+ * registers. These IDs often match those in the CAR's RST_DEVICES registers,
+ * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
+ * this case, those clocks are assigned IDs above 185 in order to highlight
+ * this issue. Implementations that interpret these clock IDs as bit values
+ * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
+ * explicitly handle these special cases.
+ *
+ * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
+ * above.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+
+/* 0 */
+/* 1 */
+/* 2 */
+#define TEGRA124_CLK_ISPB 3
+#define TEGRA124_CLK_RTC 4
+#define TEGRA124_CLK_TIMER 5
+#define TEGRA124_CLK_UARTA 6
+/* 7 (register bit affects uartb and vfir) */
+/* 8 */
+#define TEGRA124_CLK_SDMMC2 9
+/* 10 (register bit affects spdif_in and spdif_out) */
+#define TEGRA124_CLK_I2S1 11
+#define TEGRA124_CLK_I2C1 12
+/* 13 */
+#define TEGRA124_CLK_SDMMC1 14
+#define TEGRA124_CLK_SDMMC4 15
+/* 16 */
+#define TEGRA124_CLK_PWM 17
+#define TEGRA124_CLK_I2S2 18
+/* 20 (register bit affects vi and vi_sensor) */
+/* 21 */
+#define TEGRA124_CLK_USBD 22
+#define TEGRA124_CLK_ISP 23
+/* 26 */
+/* 25 */
+#define TEGRA124_CLK_DISP2 26
+#define TEGRA124_CLK_DISP1 27
+#define TEGRA124_CLK_HOST1X 28
+#define TEGRA124_CLK_VCP 29
+#define TEGRA124_CLK_I2S0 30
+/* 31 */
+
+#define TEGRA124_CLK_MC 32
+/* 33 */
+#define TEGRA124_CLK_APBDMA 34
+/* 35 */
+#define TEGRA124_CLK_KBC 36
+/* 37 */
+/* 38 */
+/* 39 (register bit affects fuse and fuse_burn) */
+#define TEGRA124_CLK_KFUSE 40
+#define TEGRA124_CLK_SBC1 41
+#define TEGRA124_CLK_NOR 42
+/* 43 */
+#define TEGRA124_CLK_SBC2 44
+/* 45 */
+#define TEGRA124_CLK_SBC3 46
+#define TEGRA124_CLK_I2C5 47
+#define TEGRA124_CLK_DSIA 48
+/* 49 */
+#define TEGRA124_CLK_MIPI 50
+#define TEGRA124_CLK_HDMI 51
+#define TEGRA124_CLK_CSI 52
+/* 53 */
+#define TEGRA124_CLK_I2C2 54
+#define TEGRA124_CLK_UARTC 55
+#define TEGRA124_CLK_MIPI_CAL 56
+#define TEGRA124_CLK_EMC 57
+#define TEGRA124_CLK_USB2 58
+#define TEGRA124_CLK_USB3 59
+/* 60 */
+#define TEGRA124_CLK_VDE 61
+#define TEGRA124_CLK_BSEA 62
+#define TEGRA124_CLK_BSEV 63
+
+/* 64 */
+#define TEGRA124_CLK_UARTD 65
+/* 66 */
+#define TEGRA124_CLK_I2C3 67
+#define TEGRA124_CLK_SBC4 68
+#define TEGRA124_CLK_SDMMC3 69
+#define TEGRA124_CLK_PCIE 70
+#define TEGRA124_CLK_OWR 71
+#define TEGRA124_CLK_AFI 72
+#define TEGRA124_CLK_CSITE 73
+/* 74 */
+/* 75 */
+#define TEGRA124_CLK_LA 76
+#define TEGRA124_CLK_TRACE 77
+#define TEGRA124_CLK_SOC_THERM 78
+#define TEGRA124_CLK_DTV 79
+/* 80 */
+#define TEGRA124_CLK_I2CSLOW 81
+#define TEGRA124_CLK_DSIB 82
+#define TEGRA124_CLK_TSEC 83
+/* 84 */
+/* 85 */
+/* 86 */
+/* 87 */
+/* 88 */
+#define TEGRA124_CLK_XUSB_HOST 89
+/* 90 */
+#define TEGRA124_CLK_MSENC 91
+#define TEGRA124_CLK_CSUS 92
+/* 93 */
+/* 94 */
+/* 95 (bit affects xusb_dev and xusb_dev_src) */
+
+/* 96 */
+/* 97 */
+/* 98 */
+#define TEGRA124_CLK_MSELECT 99
+#define TEGRA124_CLK_TSENSOR 100
+#define TEGRA124_CLK_I2S3 101
+#define TEGRA124_CLK_I2S4 102
+#define TEGRA124_CLK_I2C4 103
+#define TEGRA124_CLK_SBC5 104
+#define TEGRA124_CLK_SBC6 105
+#define TEGRA124_CLK_D_AUDIO 106
+#define TEGRA124_CLK_APBIF 107
+#define TEGRA124_CLK_DAM0 108
+#define TEGRA124_CLK_DAM1 109
+#define TEGRA124_CLK_DAM2 110
+#define TEGRA124_CLK_HDA2CODEC_2X 111
+/* 112 */
+#define TEGRA124_CLK_AUDIO0_2X 113
+#define TEGRA124_CLK_AUDIO1_2X 114
+#define TEGRA124_CLK_AUDIO2_2X 115
+#define TEGRA124_CLK_AUDIO3_2X 116
+#define TEGRA124_CLK_AUDIO4_2X 117
+#define TEGRA124_CLK_SPDIF_2X 118
+#define TEGRA124_CLK_ACTMON 119
+#define TEGRA124_CLK_EXTERN1 120
+#define TEGRA124_CLK_EXTERN2 121
+#define TEGRA124_CLK_EXTERN3 122
+#define TEGRA124_CLK_SATA_OOB 123
+#define TEGRA124_CLK_SATA 124
+#define TEGRA124_CLK_HDA 125
+/* 126 */
+#define TEGRA124_CLK_SE 127
+
+#define TEGRA124_CLK_HDA2HDMI 128
+#define TEGRA124_CLK_SATA_COLD 129
+/* 130 */
+/* 131 */
+/* 132 */
+/* 133 */
+/* 134 */
+/* 135 */
+/* 136 */
+/* 137 */
+/* 138 */
+/* 139 */
+/* 140 */
+/* 141 */
+/* 142 */
+/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
+/*      xusb_host_src and xusb_ss_src) */
+#define TEGRA124_CLK_CILAB 144
+#define TEGRA124_CLK_CILCD 145
+#define TEGRA124_CLK_CILE 146
+#define TEGRA124_CLK_DSIALP 147
+#define TEGRA124_CLK_DSIBLP 148
+#define TEGRA124_CLK_ENTROPY 149
+#define TEGRA124_CLK_DDS 150
+/* 151 */
+#define TEGRA124_CLK_DP2 152
+#define TEGRA124_CLK_AMX 153
+#define TEGRA124_CLK_ADX 154
+/* 155 (bit affects dfll_ref and dfll_soc) */
+#define TEGRA124_CLK_XUSB_SS 156
+/* 157 */
+/* 158 */
+/* 159 */
+
+/* 160 */
+/* 161 */
+/* 162 */
+/* 163 */
+/* 164 */
+/* 165 */
+#define TEGRA124_CLK_I2C6 166
+/* 167 */
+/* 168 */
+/* 169 */
+/* 170 */
+#define TEGRA124_CLK_VIM2_CLK 171
+/* 172 */
+/* 173 */
+/* 174 */
+/* 175 */
+#define TEGRA124_CLK_HDMI_AUDIO 176
+#define TEGRA124_CLK_CLK72MHZ 177
+#define TEGRA124_CLK_VIC03 178
+/* 179 */
+#define TEGRA124_CLK_ADX1 180
+#define TEGRA124_CLK_DPAUX 181
+#define TEGRA124_CLK_SOR0 182
+/* 183 */
+#define TEGRA124_CLK_GPU 184
+#define TEGRA124_CLK_AMX1 185
+/* 186 */
+/* 187 */
+/* 188 */
+/* 189 */
+/* 190 */
+/* 191 */
+#define TEGRA124_CLK_UARTB 192
+#define TEGRA124_CLK_VFIR 193
+#define TEGRA124_CLK_SPDIF_IN 194
+#define TEGRA124_CLK_SPDIF_OUT 195
+#define TEGRA124_CLK_VI 196
+#define TEGRA124_CLK_VI_SENSOR 197
+#define TEGRA124_CLK_FUSE 198
+#define TEGRA124_CLK_FUSE_BURN 199
+#define TEGRA124_CLK_CLK_32K 200
+#define TEGRA124_CLK_CLK_M 201
+#define TEGRA124_CLK_CLK_M_DIV2 202
+#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_PLL_REF 204
+#define TEGRA124_CLK_PLL_C 205
+#define TEGRA124_CLK_PLL_C_OUT1 206
+#define TEGRA124_CLK_PLL_C2 207
+#define TEGRA124_CLK_PLL_C3 208
+#define TEGRA124_CLK_PLL_M 209
+#define TEGRA124_CLK_PLL_M_OUT1 210
+#define TEGRA124_CLK_PLL_P 211
+#define TEGRA124_CLK_PLL_P_OUT1 212
+#define TEGRA124_CLK_PLL_P_OUT2 213
+#define TEGRA124_CLK_PLL_P_OUT3 214
+#define TEGRA124_CLK_PLL_P_OUT4 215
+#define TEGRA124_CLK_PLL_A 216
+#define TEGRA124_CLK_PLL_A_OUT0 217
+#define TEGRA124_CLK_PLL_D 218
+#define TEGRA124_CLK_PLL_D_OUT0 219
+#define TEGRA124_CLK_PLL_D2 220
+#define TEGRA124_CLK_PLL_D2_OUT0 221
+#define TEGRA124_CLK_PLL_U 222
+#define TEGRA124_CLK_PLL_U_480M 223
+
+#define TEGRA124_CLK_PLL_U_60M 224
+#define TEGRA124_CLK_PLL_U_48M 225
+#define TEGRA124_CLK_PLL_U_12M 226
+/* 227 */
+/* 228 */
+#define TEGRA124_CLK_PLL_RE_VCO 229
+#define TEGRA124_CLK_PLL_RE_OUT 230
+#define TEGRA124_CLK_PLL_E 231
+#define TEGRA124_CLK_SPDIF_IN_SYNC 232
+#define TEGRA124_CLK_I2S0_SYNC 233
+#define TEGRA124_CLK_I2S1_SYNC 234
+#define TEGRA124_CLK_I2S2_SYNC 235
+#define TEGRA124_CLK_I2S3_SYNC 236
+#define TEGRA124_CLK_I2S4_SYNC 237
+#define TEGRA124_CLK_VIMCLK_SYNC 238
+#define TEGRA124_CLK_AUDIO0 239
+#define TEGRA124_CLK_AUDIO1 240
+#define TEGRA124_CLK_AUDIO2 241
+#define TEGRA124_CLK_AUDIO3 242
+#define TEGRA124_CLK_AUDIO4 243
+#define TEGRA124_CLK_SPDIF 244
+#define TEGRA124_CLK_CLK_OUT_1 245
+#define TEGRA124_CLK_CLK_OUT_2 246
+#define TEGRA124_CLK_CLK_OUT_3 247
+#define TEGRA124_CLK_BLINK 248
+/* 249 */
+/* 250 */
+/* 251 */
+#define TEGRA124_CLK_XUSB_HOST_SRC 252
+#define TEGRA124_CLK_XUSB_FALCON_SRC 253
+#define TEGRA124_CLK_XUSB_FS_SRC 254
+#define TEGRA124_CLK_XUSB_SS_SRC 255
+
+#define TEGRA124_CLK_XUSB_DEV_SRC 256
+#define TEGRA124_CLK_XUSB_DEV 257
+#define TEGRA124_CLK_XUSB_HS_SRC 258
+#define TEGRA124_CLK_SCLK 259
+#define TEGRA124_CLK_HCLK 260
+#define TEGRA124_CLK_PCLK 261
+/* 262 */
+/* 263 */
+#define TEGRA124_CLK_DFLL_REF 264
+#define TEGRA124_CLK_DFLL_SOC 265
+#define TEGRA124_CLK_VI_SENSOR2 266
+#define TEGRA124_CLK_PLL_P_OUT5 267
+#define TEGRA124_CLK_CML0 268
+#define TEGRA124_CLK_CML1 269
+#define TEGRA124_CLK_PLL_C4 270
+#define TEGRA124_CLK_PLL_DP 271
+#define TEGRA124_CLK_PLL_E_MUX 272
+#define TEGRA124_CLK_PLLD_DSI 273
+/* 274 */
+/* 275 */
+/* 276 */
+/* 277 */
+/* 278 */
+/* 279 */
+/* 280 */
+/* 281 */
+/* 282 */
+/* 283 */
+/* 284 */
+/* 285 */
+/* 286 */
+/* 287 */
+
+/* 288 */
+/* 289 */
+/* 290 */
+/* 291 */
+/* 292 */
+/* 293 */
+/* 294 */
+/* 295 */
+/* 296 */
+/* 297 */
+/* 298 */
+/* 299 */
+#define TEGRA124_CLK_AUDIO0_MUX 300
+#define TEGRA124_CLK_AUDIO1_MUX 301
+#define TEGRA124_CLK_AUDIO2_MUX 302
+#define TEGRA124_CLK_AUDIO3_MUX 303
+#define TEGRA124_CLK_AUDIO4_MUX 304
+#define TEGRA124_CLK_SPDIF_MUX 305
+#define TEGRA124_CLK_CLK_OUT_1_MUX 306
+#define TEGRA124_CLK_CLK_OUT_2_MUX 307
+#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 309 */
+/* 310 */
+#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_XUSB_SS_DIV2 312
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */
index af9bc9a3ddbc561840b60f6f5416f19f846e8567..2860737f04436ba0515895b7b04b85e4bfe7885c 100644 (file)
 /*
- * This header provides constants for binding nvidia,tegra124-car.
- *
- * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
- * registers. These IDs often match those in the CAR's RST_DEVICES registers,
- * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
- * this case, those clocks are assigned IDs above 185 in order to highlight
- * this issue. Implementations that interpret these clock IDs as bit values
- * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
- * explicitly handle these special cases.
- *
- * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
- * above.
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
  */
 
+#include <dt-bindings/clock/tegra124-car-common.h>
+
 #ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 #define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 
-/* 0 */
-/* 1 */
-/* 2 */
-#define TEGRA124_CLK_ISPB 3
-#define TEGRA124_CLK_RTC 4
-#define TEGRA124_CLK_TIMER 5
-#define TEGRA124_CLK_UARTA 6
-/* 7 (register bit affects uartb and vfir) */
-/* 8 */
-#define TEGRA124_CLK_SDMMC2 9
-/* 10 (register bit affects spdif_in and spdif_out) */
-#define TEGRA124_CLK_I2S1 11
-#define TEGRA124_CLK_I2C1 12
-/* 13 */
-#define TEGRA124_CLK_SDMMC1 14
-#define TEGRA124_CLK_SDMMC4 15
-/* 16 */
-#define TEGRA124_CLK_PWM 17
-#define TEGRA124_CLK_I2S2 18
-/* 20 (register bit affects vi and vi_sensor) */
-/* 21 */
-#define TEGRA124_CLK_USBD 22
-#define TEGRA124_CLK_ISP 23
-/* 26 */
-/* 25 */
-#define TEGRA124_CLK_DISP2 26
-#define TEGRA124_CLK_DISP1 27
-#define TEGRA124_CLK_HOST1X 28
-#define TEGRA124_CLK_VCP 29
-#define TEGRA124_CLK_I2S0 30
-/* 31 */
-
-#define TEGRA124_CLK_MC 32
-/* 33 */
-#define TEGRA124_CLK_APBDMA 34
-/* 35 */
-#define TEGRA124_CLK_KBC 36
-/* 37 */
-/* 38 */
-/* 39 (register bit affects fuse and fuse_burn) */
-#define TEGRA124_CLK_KFUSE 40
-#define TEGRA124_CLK_SBC1 41
-#define TEGRA124_CLK_NOR 42
-/* 43 */
-#define TEGRA124_CLK_SBC2 44
-/* 45 */
-#define TEGRA124_CLK_SBC3 46
-#define TEGRA124_CLK_I2C5 47
-#define TEGRA124_CLK_DSIA 48
-/* 49 */
-#define TEGRA124_CLK_MIPI 50
-#define TEGRA124_CLK_HDMI 51
-#define TEGRA124_CLK_CSI 52
-/* 53 */
-#define TEGRA124_CLK_I2C2 54
-#define TEGRA124_CLK_UARTC 55
-#define TEGRA124_CLK_MIPI_CAL 56
-#define TEGRA124_CLK_EMC 57
-#define TEGRA124_CLK_USB2 58
-#define TEGRA124_CLK_USB3 59
-/* 60 */
-#define TEGRA124_CLK_VDE 61
-#define TEGRA124_CLK_BSEA 62
-#define TEGRA124_CLK_BSEV 63
-
-/* 64 */
-#define TEGRA124_CLK_UARTD 65
-/* 66 */
-#define TEGRA124_CLK_I2C3 67
-#define TEGRA124_CLK_SBC4 68
-#define TEGRA124_CLK_SDMMC3 69
-#define TEGRA124_CLK_PCIE 70
-#define TEGRA124_CLK_OWR 71
-#define TEGRA124_CLK_AFI 72
-#define TEGRA124_CLK_CSITE 73
-/* 74 */
-/* 75 */
-#define TEGRA124_CLK_LA 76
-#define TEGRA124_CLK_TRACE 77
-#define TEGRA124_CLK_SOC_THERM 78
-#define TEGRA124_CLK_DTV 79
-/* 80 */
-#define TEGRA124_CLK_I2CSLOW 81
-#define TEGRA124_CLK_DSIB 82
-#define TEGRA124_CLK_TSEC 83
-/* 84 */
-/* 85 */
-/* 86 */
-/* 87 */
-/* 88 */
-#define TEGRA124_CLK_XUSB_HOST 89
-/* 90 */
-#define TEGRA124_CLK_MSENC 91
-#define TEGRA124_CLK_CSUS 92
-/* 93 */
-/* 94 */
-/* 95 (bit affects xusb_dev and xusb_dev_src) */
-
-/* 96 */
-/* 97 */
-/* 98 */
-#define TEGRA124_CLK_MSELECT 99
-#define TEGRA124_CLK_TSENSOR 100
-#define TEGRA124_CLK_I2S3 101
-#define TEGRA124_CLK_I2S4 102
-#define TEGRA124_CLK_I2C4 103
-#define TEGRA124_CLK_SBC5 104
-#define TEGRA124_CLK_SBC6 105
-#define TEGRA124_CLK_D_AUDIO 106
-#define TEGRA124_CLK_APBIF 107
-#define TEGRA124_CLK_DAM0 108
-#define TEGRA124_CLK_DAM1 109
-#define TEGRA124_CLK_DAM2 110
-#define TEGRA124_CLK_HDA2CODEC_2X 111
-/* 112 */
-#define TEGRA124_CLK_AUDIO0_2X 113
-#define TEGRA124_CLK_AUDIO1_2X 114
-#define TEGRA124_CLK_AUDIO2_2X 115
-#define TEGRA124_CLK_AUDIO3_2X 116
-#define TEGRA124_CLK_AUDIO4_2X 117
-#define TEGRA124_CLK_SPDIF_2X 118
-#define TEGRA124_CLK_ACTMON 119
-#define TEGRA124_CLK_EXTERN1 120
-#define TEGRA124_CLK_EXTERN2 121
-#define TEGRA124_CLK_EXTERN3 122
-#define TEGRA124_CLK_SATA_OOB 123
-#define TEGRA124_CLK_SATA 124
-#define TEGRA124_CLK_HDA 125
-/* 126 */
-#define TEGRA124_CLK_SE 127
-
-#define TEGRA124_CLK_HDA2HDMI 128
-#define TEGRA124_CLK_SATA_COLD 129
-/* 130 */
-/* 131 */
-/* 132 */
-/* 133 */
-/* 134 */
-/* 135 */
-/* 136 */
-/* 137 */
-/* 138 */
-/* 139 */
-/* 140 */
-/* 141 */
-/* 142 */
-/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
-/*      xusb_host_src and xusb_ss_src) */
-#define TEGRA124_CLK_CILAB 144
-#define TEGRA124_CLK_CILCD 145
-#define TEGRA124_CLK_CILE 146
-#define TEGRA124_CLK_DSIALP 147
-#define TEGRA124_CLK_DSIBLP 148
-#define TEGRA124_CLK_ENTROPY 149
-#define TEGRA124_CLK_DDS 150
-/* 151 */
-#define TEGRA124_CLK_DP2 152
-#define TEGRA124_CLK_AMX 153
-#define TEGRA124_CLK_ADX 154
-/* 155 (bit affects dfll_ref and dfll_soc) */
-#define TEGRA124_CLK_XUSB_SS 156
-/* 157 */
-/* 158 */
-/* 159 */
-
-/* 160 */
-/* 161 */
-/* 162 */
-/* 163 */
-/* 164 */
-/* 165 */
-#define TEGRA124_CLK_I2C6 166
-/* 167 */
-/* 168 */
-/* 169 */
-/* 170 */
-#define TEGRA124_CLK_VIM2_CLK 171
-/* 172 */
-/* 173 */
-/* 174 */
-/* 175 */
-#define TEGRA124_CLK_HDMI_AUDIO 176
-#define TEGRA124_CLK_CLK72MHZ 177
-#define TEGRA124_CLK_VIC03 178
-/* 179 */
-#define TEGRA124_CLK_ADX1 180
-#define TEGRA124_CLK_DPAUX 181
-#define TEGRA124_CLK_SOR0 182
-/* 183 */
-#define TEGRA124_CLK_GPU 184
-#define TEGRA124_CLK_AMX1 185
-/* 186 */
-/* 187 */
-/* 188 */
-/* 189 */
-/* 190 */
-/* 191 */
-#define TEGRA124_CLK_UARTB 192
-#define TEGRA124_CLK_VFIR 193
-#define TEGRA124_CLK_SPDIF_IN 194
-#define TEGRA124_CLK_SPDIF_OUT 195
-#define TEGRA124_CLK_VI 196
-#define TEGRA124_CLK_VI_SENSOR 197
-#define TEGRA124_CLK_FUSE 198
-#define TEGRA124_CLK_FUSE_BURN 199
-#define TEGRA124_CLK_CLK_32K 200
-#define TEGRA124_CLK_CLK_M 201
-#define TEGRA124_CLK_CLK_M_DIV2 202
-#define TEGRA124_CLK_CLK_M_DIV4 203
-#define TEGRA124_CLK_PLL_REF 204
-#define TEGRA124_CLK_PLL_C 205
-#define TEGRA124_CLK_PLL_C_OUT1 206
-#define TEGRA124_CLK_PLL_C2 207
-#define TEGRA124_CLK_PLL_C3 208
-#define TEGRA124_CLK_PLL_M 209
-#define TEGRA124_CLK_PLL_M_OUT1 210
-#define TEGRA124_CLK_PLL_P 211
-#define TEGRA124_CLK_PLL_P_OUT1 212
-#define TEGRA124_CLK_PLL_P_OUT2 213
-#define TEGRA124_CLK_PLL_P_OUT3 214
-#define TEGRA124_CLK_PLL_P_OUT4 215
-#define TEGRA124_CLK_PLL_A 216
-#define TEGRA124_CLK_PLL_A_OUT0 217
-#define TEGRA124_CLK_PLL_D 218
-#define TEGRA124_CLK_PLL_D_OUT0 219
-#define TEGRA124_CLK_PLL_D2 220
-#define TEGRA124_CLK_PLL_D2_OUT0 221
-#define TEGRA124_CLK_PLL_U 222
-#define TEGRA124_CLK_PLL_U_480M 223
-
-#define TEGRA124_CLK_PLL_U_60M 224
-#define TEGRA124_CLK_PLL_U_48M 225
-#define TEGRA124_CLK_PLL_U_12M 226
-#define TEGRA124_CLK_PLL_X 227
-#define TEGRA124_CLK_PLL_X_OUT0 228
-#define TEGRA124_CLK_PLL_RE_VCO 229
-#define TEGRA124_CLK_PLL_RE_OUT 230
-#define TEGRA124_CLK_PLL_E 231
-#define TEGRA124_CLK_SPDIF_IN_SYNC 232
-#define TEGRA124_CLK_I2S0_SYNC 233
-#define TEGRA124_CLK_I2S1_SYNC 234
-#define TEGRA124_CLK_I2S2_SYNC 235
-#define TEGRA124_CLK_I2S3_SYNC 236
-#define TEGRA124_CLK_I2S4_SYNC 237
-#define TEGRA124_CLK_VIMCLK_SYNC 238
-#define TEGRA124_CLK_AUDIO0 239
-#define TEGRA124_CLK_AUDIO1 240
-#define TEGRA124_CLK_AUDIO2 241
-#define TEGRA124_CLK_AUDIO3 242
-#define TEGRA124_CLK_AUDIO4 243
-#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
-/* 249 */
-/* 250 */
-/* 251 */
-#define TEGRA124_CLK_XUSB_HOST_SRC 252
-#define TEGRA124_CLK_XUSB_FALCON_SRC 253
-#define TEGRA124_CLK_XUSB_FS_SRC 254
-#define TEGRA124_CLK_XUSB_SS_SRC 255
-
-#define TEGRA124_CLK_XUSB_DEV_SRC 256
-#define TEGRA124_CLK_XUSB_DEV 257
-#define TEGRA124_CLK_XUSB_HS_SRC 258
-#define TEGRA124_CLK_SCLK 259
-#define TEGRA124_CLK_HCLK 260
-#define TEGRA124_CLK_PCLK 261
-#define TEGRA124_CLK_CCLK_G 262
-#define TEGRA124_CLK_CCLK_LP 263
-#define TEGRA124_CLK_DFLL_REF 264
-#define TEGRA124_CLK_DFLL_SOC 265
-#define TEGRA124_CLK_VI_SENSOR2 266
-#define TEGRA124_CLK_PLL_P_OUT5 267
-#define TEGRA124_CLK_CML0 268
-#define TEGRA124_CLK_CML1 269
-#define TEGRA124_CLK_PLL_C4 270
-#define TEGRA124_CLK_PLL_DP 271
-#define TEGRA124_CLK_PLL_E_MUX 272
-/* 273 */
-/* 274 */
-/* 275 */
-/* 276 */
-/* 277 */
-/* 278 */
-/* 279 */
-/* 280 */
-/* 281 */
-/* 282 */
-/* 283 */
-/* 284 */
-/* 285 */
-/* 286 */
-/* 287 */
-
-/* 288 */
-/* 289 */
-/* 290 */
-/* 291 */
-/* 292 */
-/* 293 */
-/* 294 */
-/* 295 */
-/* 296 */
-/* 297 */
-/* 298 */
-/* 299 */
-#define TEGRA124_CLK_AUDIO0_MUX 300
-#define TEGRA124_CLK_AUDIO1_MUX 301
-#define TEGRA124_CLK_AUDIO2_MUX 302
-#define TEGRA124_CLK_AUDIO3_MUX 303
-#define TEGRA124_CLK_AUDIO4_MUX 304
-#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
-#define TEGRA124_CLK_DSIA_MUX 309
-#define TEGRA124_CLK_DSIB_MUX 310
-#define TEGRA124_CLK_SOR0_LVDS 311
-#define TEGRA124_CLK_XUSB_SS_DIV2 312
+#define TEGRA124_CLK_PLL_X             227
+#define TEGRA124_CLK_PLL_X_OUT0                228
 
-#define TEGRA124_CLK_PLL_M_UD 313
-#define TEGRA124_CLK_PLL_C_UD 314
+#define TEGRA124_CLK_CCLK_G            262
+#define TEGRA124_CLK_CCLK_LP           263
 
-#define TEGRA124_CLK_CLK_MAX 315
+#define TEGRA124_CLK_CLK_MAX           315
 
 #endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
new file mode 100644 (file)
index 0000000..388a6f3
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * This header provides constants for the Qualcomm RPM bindings.
+ */
+
+#ifndef _DT_BINDINGS_MFD_QCOM_RPM_H
+#define _DT_BINDINGS_MFD_QCOM_RPM_H
+
+/*
+ * Constants use to identify individual resources in the RPM.
+ */
+#define QCOM_RPM_APPS_FABRIC_ARB               1
+#define QCOM_RPM_APPS_FABRIC_CLK               2
+#define QCOM_RPM_APPS_FABRIC_HALT              3
+#define QCOM_RPM_APPS_FABRIC_IOCTL             4
+#define QCOM_RPM_APPS_FABRIC_MODE              5
+#define QCOM_RPM_APPS_L2_CACHE_CTL             6
+#define QCOM_RPM_CFPB_CLK                      7
+#define QCOM_RPM_CXO_BUFFERS                   8
+#define QCOM_RPM_CXO_CLK                       9
+#define QCOM_RPM_DAYTONA_FABRIC_CLK            10
+#define QCOM_RPM_DDR_DMM                       11
+#define QCOM_RPM_EBI1_CLK                      12
+#define QCOM_RPM_HDMI_SWITCH                   13
+#define QCOM_RPM_MMFPB_CLK                     14
+#define QCOM_RPM_MM_FABRIC_ARB                 15
+#define QCOM_RPM_MM_FABRIC_CLK                 16
+#define QCOM_RPM_MM_FABRIC_HALT                        17
+#define QCOM_RPM_MM_FABRIC_IOCTL               18
+#define QCOM_RPM_MM_FABRIC_MODE                        19
+#define QCOM_RPM_PLL_4                         20
+#define QCOM_RPM_PM8058_LDO0                   21
+#define QCOM_RPM_PM8058_LDO1                   22
+#define QCOM_RPM_PM8058_LDO2                   23
+#define QCOM_RPM_PM8058_LDO3                   24
+#define QCOM_RPM_PM8058_LDO4                   25
+#define QCOM_RPM_PM8058_LDO5                   26
+#define QCOM_RPM_PM8058_LDO6                   27
+#define QCOM_RPM_PM8058_LDO7                   28
+#define QCOM_RPM_PM8058_LDO8                   29
+#define QCOM_RPM_PM8058_LDO9                   30
+#define QCOM_RPM_PM8058_LDO10                  31
+#define QCOM_RPM_PM8058_LDO11                  32
+#define QCOM_RPM_PM8058_LDO12                  33
+#define QCOM_RPM_PM8058_LDO13                  34
+#define QCOM_RPM_PM8058_LDO14                  35
+#define QCOM_RPM_PM8058_LDO15                  36
+#define QCOM_RPM_PM8058_LDO16                  37
+#define QCOM_RPM_PM8058_LDO17                  38
+#define QCOM_RPM_PM8058_LDO18                  39
+#define QCOM_RPM_PM8058_LDO19                  40
+#define QCOM_RPM_PM8058_LDO20                  41
+#define QCOM_RPM_PM8058_LDO21                  42
+#define QCOM_RPM_PM8058_LDO22                  43
+#define QCOM_RPM_PM8058_LDO23                  44
+#define QCOM_RPM_PM8058_LDO24                  45
+#define QCOM_RPM_PM8058_LDO25                  46
+#define QCOM_RPM_PM8058_LVS0                   47
+#define QCOM_RPM_PM8058_LVS1                   48
+#define QCOM_RPM_PM8058_NCP                    49
+#define QCOM_RPM_PM8058_SMPS0                  50
+#define QCOM_RPM_PM8058_SMPS1                  51
+#define QCOM_RPM_PM8058_SMPS2                  52
+#define QCOM_RPM_PM8058_SMPS3                  53
+#define QCOM_RPM_PM8058_SMPS4                  54
+#define QCOM_RPM_PM8821_LDO1                   55
+#define QCOM_RPM_PM8821_SMPS1                  56
+#define QCOM_RPM_PM8821_SMPS2                  57
+#define QCOM_RPM_PM8901_LDO0                   58
+#define QCOM_RPM_PM8901_LDO1                   59
+#define QCOM_RPM_PM8901_LDO2                   60
+#define QCOM_RPM_PM8901_LDO3                   61
+#define QCOM_RPM_PM8901_LDO4                   62
+#define QCOM_RPM_PM8901_LDO5                   63
+#define QCOM_RPM_PM8901_LDO6                   64
+#define QCOM_RPM_PM8901_LVS0                   65
+#define QCOM_RPM_PM8901_LVS1                   66
+#define QCOM_RPM_PM8901_LVS2                   67
+#define QCOM_RPM_PM8901_LVS3                   68
+#define QCOM_RPM_PM8901_MVS                    69
+#define QCOM_RPM_PM8901_SMPS0                  70
+#define QCOM_RPM_PM8901_SMPS1                  71
+#define QCOM_RPM_PM8901_SMPS2                  72
+#define QCOM_RPM_PM8901_SMPS3                  73
+#define QCOM_RPM_PM8901_SMPS4                  74
+#define QCOM_RPM_PM8921_CLK1                   75
+#define QCOM_RPM_PM8921_CLK2                   76
+#define QCOM_RPM_PM8921_LDO1                   77
+#define QCOM_RPM_PM8921_LDO2                   78
+#define QCOM_RPM_PM8921_LDO3                   79
+#define QCOM_RPM_PM8921_LDO4                   80
+#define QCOM_RPM_PM8921_LDO5                   81
+#define QCOM_RPM_PM8921_LDO6                   82
+#define QCOM_RPM_PM8921_LDO7                   83
+#define QCOM_RPM_PM8921_LDO8                   84
+#define QCOM_RPM_PM8921_LDO9                   85
+#define QCOM_RPM_PM8921_LDO10                  86
+#define QCOM_RPM_PM8921_LDO11                  87
+#define QCOM_RPM_PM8921_LDO12                  88
+#define QCOM_RPM_PM8921_LDO13                  89
+#define QCOM_RPM_PM8921_LDO14                  90
+#define QCOM_RPM_PM8921_LDO15                  91
+#define QCOM_RPM_PM8921_LDO16                  92
+#define QCOM_RPM_PM8921_LDO17                  93
+#define QCOM_RPM_PM8921_LDO18                  94
+#define QCOM_RPM_PM8921_LDO19                  95
+#define QCOM_RPM_PM8921_LDO20                  96
+#define QCOM_RPM_PM8921_LDO21                  97
+#define QCOM_RPM_PM8921_LDO22                  98
+#define QCOM_RPM_PM8921_LDO23                  99
+#define QCOM_RPM_PM8921_LDO24                  100
+#define QCOM_RPM_PM8921_LDO25                  101
+#define QCOM_RPM_PM8921_LDO26                  102
+#define QCOM_RPM_PM8921_LDO27                  103
+#define QCOM_RPM_PM8921_LDO28                  104
+#define QCOM_RPM_PM8921_LDO29                  105
+#define QCOM_RPM_PM8921_LVS1                   106
+#define QCOM_RPM_PM8921_LVS2                   107
+#define QCOM_RPM_PM8921_LVS3                   108
+#define QCOM_RPM_PM8921_LVS4                   109
+#define QCOM_RPM_PM8921_LVS5                   110
+#define QCOM_RPM_PM8921_LVS6                   111
+#define QCOM_RPM_PM8921_LVS7                   112
+#define QCOM_RPM_PM8921_MVS                    113
+#define QCOM_RPM_PM8921_NCP                    114
+#define QCOM_RPM_PM8921_SMPS1                  115
+#define QCOM_RPM_PM8921_SMPS2                  116
+#define QCOM_RPM_PM8921_SMPS3                  117
+#define QCOM_RPM_PM8921_SMPS4                  118
+#define QCOM_RPM_PM8921_SMPS5                  119
+#define QCOM_RPM_PM8921_SMPS6                  120
+#define QCOM_RPM_PM8921_SMPS7                  121
+#define QCOM_RPM_PM8921_SMPS8                  122
+#define QCOM_RPM_PXO_CLK                       123
+#define QCOM_RPM_QDSS_CLK                      124
+#define QCOM_RPM_SFPB_CLK                      125
+#define QCOM_RPM_SMI_CLK                       126
+#define QCOM_RPM_SYS_FABRIC_ARB                        127
+#define QCOM_RPM_SYS_FABRIC_CLK                        128
+#define QCOM_RPM_SYS_FABRIC_HALT               129
+#define QCOM_RPM_SYS_FABRIC_IOCTL              130
+#define QCOM_RPM_SYS_FABRIC_MODE               131
+#define QCOM_RPM_USB_OTG_SWITCH                        132
+#define QCOM_RPM_VDDMIN_GPIO                   133
+
+/*
+ * Constants used to select force mode for regulators.
+ */
+#define QCOM_RPM_FORCE_MODE_NONE               0
+#define QCOM_RPM_FORCE_MODE_LPM                        1
+#define QCOM_RPM_FORCE_MODE_HPM                        2
+#define QCOM_RPM_FORCE_MODE_AUTO               3
+#define QCOM_RPM_FORCE_MODE_BYPASS             4
+
+#endif
diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h
new file mode 100644 (file)
index 0000000..0646500
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
+ *
+ *  Copyright (C) 2014 Samsung Electronics
+ *  Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EXYNOS_THERMAL_TMU_DT_H
+#define _EXYNOS_THERMAL_TMU_DT_H
+
+#define TYPE_ONE_POINT_TRIMMING 0
+#define TYPE_ONE_POINT_TRIMMING_25 1
+#define TYPE_ONE_POINT_TRIMMING_85 2
+#define TYPE_TWO_POINT_TRIMMING 3
+#define TYPE_NONE 4
+
+#endif /* _EXYNOS_THERMAL_TMU_DT_H */
index b708786d4cbf6d374bdff697eac61e623d427723..5582c211f594e03d188a56631446ec94016d9fc0 100644 (file)
@@ -16,6 +16,7 @@ struct bcm47xx_wdt {
 
        struct watchdog_device wdd;
        struct notifier_block notifier;
+       struct notifier_block restart_handler;
 
        struct timer_list soft_timer;
        atomic_t soft_ticks;
index 994739da827f26cb574e97411e92b82d3f23d340..44057b45ed326d4274a4cdd008465e40e1db8667 100644 (file)
@@ -434,6 +434,18 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
        return bcma_find_core_unit(bus, coreid, 0);
 }
 
+#ifdef CONFIG_BCMA_HOST_PCI
+extern void bcma_host_pci_up(struct bcma_bus *bus);
+extern void bcma_host_pci_down(struct bcma_bus *bus);
+#else
+static inline void bcma_host_pci_up(struct bcma_bus *bus)
+{
+}
+static inline void bcma_host_pci_down(struct bcma_bus *bus)
+{
+}
+#endif
+
 extern bool bcma_core_is_enabled(struct bcma_device *core);
 extern void bcma_core_disable(struct bcma_device *core, u32 flags);
 extern int bcma_core_enable(struct bcma_device *core, u32 flags);
index db6fa217f98bf3f1276ac659c61e50099da739e4..6cceedf65ca27d787f995980cf716de2d0a2be47 100644 (file)
@@ -663,14 +663,6 @@ struct bcma_drv_cc_b {
 #define bcma_cc_maskset32(cc, offset, mask, set) \
        bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
 
-extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
-extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
-
-extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
-extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
-
-void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-
 extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
 
 extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
@@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
 u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
 
 /* PMU support */
-extern void bcma_pmu_init(struct bcma_drv_cc *cc);
-extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
-
 extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
                                  u32 value);
 extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
index 4dd1f33e36a20accc10d1aa1f1d66cfd5203f49b..4354d4ea6713da3d1121cde7bd8a0c0325626243 100644 (file)
@@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn {
 #define gmac_cmn_write16(gc, offset, val)      bcma_write16((gc)->core, offset, val)
 #define gmac_cmn_write32(gc, offset, val)      bcma_write32((gc)->core, offset, val)
 
-#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
-extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
-#else
-static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
-#endif
-
 #endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
index 0b3b32aeeb8af8c76b9ed23ff0017ef79e92e635..8eea7f9e33b45d1665aed004f874d9b931ad2769 100644 (file)
@@ -39,21 +39,6 @@ struct bcma_drv_mips {
        u8 early_setup_done:1;
 };
 
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
-extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-
-extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
-#else
-static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
-static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-
-static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
-{
-       return 0;
-}
-#endif
-
 extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
 
 #endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
index 3f809ae372c4aa702cf13f350840fd80552ee1ca..8e90004fdfd7b215074d7a4a97280d6405765db4 100644 (file)
@@ -238,12 +238,8 @@ struct bcma_drv_pci {
 #define pcicore_write16(pc, offset, val)       bcma_write16((pc)->core, offset, val)
 #define pcicore_write32(pc, offset, val)       bcma_write32((pc)->core, offset, val)
 
-extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
-extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
-extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
+extern int bcma_core_pci_irq_ctl(struct bcma_bus *bus,
                                 struct bcma_device *core, bool enable);
-extern void bcma_core_pci_up(struct bcma_bus *bus);
-extern void bcma_core_pci_down(struct bcma_bus *bus);
 extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
 
 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
index 5988b05781c336d21c047339d8f7027bf3eaa068..31e6d17ab7985c4800110121ceca7209a38e4838 100644 (file)
 
 struct bcma_drv_pcie2 {
        struct bcma_device *core;
+
+       u16 reqsize;
 };
 
 #define pcie2_read16(pcie2, offset)            bcma_read16((pcie2)->core, offset)
@@ -153,6 +155,4 @@ struct bcma_drv_pcie2 {
 #define pcie2_set32(pcie2, offset, set)                bcma_set32((pcie2)->core, offset, set)
 #define pcie2_mask32(pcie2, offset, mask)      bcma_mask32((pcie2)->core, offset, mask)
 
-void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
-
 #endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
index c0dadaac26e3725b0fcee4573f817c326a0dbd7f..31eb03d0c7662dddd02fb126a24bc4198ffc65d8 100644 (file)
@@ -158,17 +158,6 @@ enum {
 };
 
 
-/* pool operations */
-enum {
-  POOL_OP_CREATE                       = 0x01,
-  POOL_OP_DELETE                       = 0x02,
-  POOL_OP_AUID_CHANGE                  = 0x03,
-  POOL_OP_CREATE_SNAP                  = 0x11,
-  POOL_OP_DELETE_SNAP                  = 0x12,
-  POOL_OP_CREATE_UNMANAGED_SNAP                = 0x21,
-  POOL_OP_DELETE_UNMANAGED_SNAP                = 0x22,
-};
-
 struct ceph_mon_request_header {
        __le64 have_version;
        __le16 session_mon;
@@ -191,31 +180,6 @@ struct ceph_mon_statfs_reply {
        struct ceph_statfs st;
 } __attribute__ ((packed));
 
-const char *ceph_pool_op_name(int op);
-
-struct ceph_mon_poolop {
-       struct ceph_mon_request_header monhdr;
-       struct ceph_fsid fsid;
-       __le32 pool;
-       __le32 op;
-       __le64 auid;
-       __le64 snapid;
-       __le32 name_len;
-} __attribute__ ((packed));
-
-struct ceph_mon_poolop_reply {
-       struct ceph_mon_request_header monhdr;
-       struct ceph_fsid fsid;
-       __le32 reply_code;
-       __le32 epoch;
-       char has_data;
-       char data[0];
-} __attribute__ ((packed));
-
-struct ceph_mon_unmanaged_snap {
-       __le64 snapid;
-} __attribute__ ((packed));
-
 struct ceph_osd_getmap {
        struct ceph_mon_request_header monhdr;
        struct ceph_fsid fsid;
@@ -307,6 +271,7 @@ enum {
        CEPH_SESSION_RECALL_STATE,
        CEPH_SESSION_FLUSHMSG,
        CEPH_SESSION_FLUSHMSG_ACK,
+       CEPH_SESSION_FORCE_RO,
 };
 
 extern const char *ceph_session_op_name(int op);
index 8b11a79ca1cbf53630d0546b8a6d50446236d3a9..16fff9608848db88dd2c9bca903bc57ca971f527 100644 (file)
@@ -30,8 +30,9 @@
 #define CEPH_OPT_MYIP             (1<<2) /* specified my ip */
 #define CEPH_OPT_NOCRC            (1<<3) /* no data crc on writes */
 #define CEPH_OPT_NOMSGAUTH       (1<<4) /* not require cephx message signature */
+#define CEPH_OPT_TCP_NODELAY     (1<<5) /* TCP_NODELAY on TCP sockets */
 
-#define CEPH_OPT_DEFAULT   (0)
+#define CEPH_OPT_DEFAULT   (CEPH_OPT_TCP_NODELAY)
 
 #define ceph_set_opt(client, opt) \
        (client)->options->flags |= CEPH_OPT_##opt;
index d9d396c165037a7d6f661a41ec0dccec12f7dd1a..e15499422fdcc7686942ce88389d686d9078cd79 100644 (file)
@@ -57,6 +57,7 @@ struct ceph_messenger {
 
        atomic_t stopping;
        bool nocrc;
+       bool tcp_nodelay;
 
        /*
         * the global_seq counts connections i (attempt to) initiate
@@ -264,7 +265,8 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
                        struct ceph_entity_addr *myaddr,
                        u64 supported_features,
                        u64 required_features,
-                       bool nocrc);
+                       bool nocrc,
+                       bool tcp_nodelay);
 
 extern void ceph_con_init(struct ceph_connection *con, void *private,
                        const struct ceph_connection_operations *ops,
index deb47e45ac7c29b11642842a733a351f05bedbb3..81810dc21f061ce1acf5129f5dea5e6f7f758e65 100644 (file)
@@ -40,7 +40,7 @@ struct ceph_mon_request {
 };
 
 /*
- * ceph_mon_generic_request is being used for the statfs, poolop and
+ * ceph_mon_generic_request is being used for the statfs and
  * mon_get_version requests which are being done a bit differently
  * because we need to get data back to the caller
  */
@@ -50,7 +50,6 @@ struct ceph_mon_generic_request {
        struct rb_node node;
        int result;
        void *buf;
-       int buf_len;
        struct completion completion;
        struct ceph_msg *request;  /* original request */
        struct ceph_msg *reply;    /* and reply */
@@ -117,10 +116,4 @@ extern int ceph_monc_open_session(struct ceph_mon_client *monc);
 
 extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
 
-extern int ceph_monc_create_snapid(struct ceph_mon_client *monc,
-                                  u32 pool, u64 *snapid);
-
-extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
-                                  u32 pool, u64 snapid);
-
 #endif
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
deleted file mode 100644 (file)
index 0ca5f60..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- *  linux/include/linux/clk-private.h
- *
- *  Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
- *  Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_CLK_PRIVATE_H
-#define __LINUX_CLK_PRIVATE_H
-
-#include <linux/clk-provider.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-
-/*
- * WARNING: Do not include clk-private.h from any file that implements struct
- * clk_ops.  Doing so is a layering violation!
- *
- * This header exists only to allow for statically initialized clock data.  Any
- * static clock data must be defined in a separate file from the logic that
- * implements the clock operations for that same data.
- */
-
-#ifdef CONFIG_COMMON_CLK
-
-struct module;
-
-struct clk {
-       const char              *name;
-       const struct clk_ops    *ops;
-       struct clk_hw           *hw;
-       struct module           *owner;
-       struct clk              *parent;
-       const char              **parent_names;
-       struct clk              **parents;
-       u8                      num_parents;
-       u8                      new_parent_index;
-       unsigned long           rate;
-       unsigned long           new_rate;
-       struct clk              *new_parent;
-       struct clk              *new_child;
-       unsigned long           flags;
-       unsigned int            enable_count;
-       unsigned int            prepare_count;
-       unsigned long           accuracy;
-       int                     phase;
-       struct hlist_head       children;
-       struct hlist_node       child_node;
-       struct hlist_node       debug_node;
-       unsigned int            notifier_count;
-#ifdef CONFIG_DEBUG_FS
-       struct dentry           *dentry;
-#endif
-       struct kref             ref;
-};
-
-/*
- * DOC: Basic clock implementations common to many platforms
- *
- * Each basic clock hardware type is comprised of a structure describing the
- * clock hardware, implementations of the relevant callbacks in struct clk_ops,
- * unique flags for that hardware type, a registration function and an
- * alternative macro for static initialization
- */
-
-#define DEFINE_CLK(_name, _ops, _flags, _parent_names,         \
-               _parents)                                       \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &_ops,                                   \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _parent_names,                  \
-               .num_parents = ARRAY_SIZE(_parent_names),       \
-               .parents = _parents,                            \
-               .flags = _flags | CLK_IS_BASIC,                 \
-       }
-
-#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate,            \
-                               _fixed_rate_flags)              \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {};         \
-       static struct clk_fixed_rate _name##_hw = {             \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .fixed_rate = _rate,                            \
-               .flags = _fixed_rate_flags,                     \
-       };                                                      \
-       DEFINE_CLK(_name, clk_fixed_rate_ops, _flags,           \
-                       _name##_parent_names, NULL);
-
-#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr,      \
-                               _flags, _reg, _bit_idx,         \
-                               _gate_flags, _lock)             \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_gate _name##_hw = {                   \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .bit_idx = _bit_idx,                            \
-               .flags = _gate_flags,                           \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_gate_ops, _flags,                 \
-                       _name##_parent_names, _name##_parents);
-
-#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,  \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _table, _lock)  \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_divider _name##_hw = {                \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .shift = _shift,                                \
-               .width = _width,                                \
-               .flags = _divider_flags,                        \
-               .table = _table,                                \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_divider_ops, _flags,              \
-                       _name##_parent_names, _name##_parents);
-
-#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _lock)          \
-       _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, NULL, _lock)
-
-#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name,          \
-                               _parent_ptr, _flags, _reg,      \
-                               _shift, _width, _divider_flags, \
-                               _table, _lock)                  \
-       _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _table, _lock)  \
-
-#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
-                               _reg, _shift, _width,           \
-                               _mux_flags, _lock)              \
-       static struct clk _name;                                \
-       static struct clk_mux _name##_hw = {                    \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .shift = _shift,                                \
-               .mask = BIT(_width) - 1,                        \
-               .flags = _mux_flags,                            \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names,   \
-                       _parents);
-
-#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name,           \
-                               _parent_ptr, _flags,            \
-                               _mult, _div)                    \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_fixed_factor _name##_hw = {           \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .mult = _mult,                                  \
-               .div = _div,                                    \
-       };                                                      \
-       DEFINE_CLK(_name, clk_fixed_factor_ops, _flags,         \
-                       _name##_parent_names, _name##_parents);
-
-/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev:       device initializing this clk, placeholder for now
- * @clk:       clk being initialized
- *
- * Initializes the lists in struct clk, queries the hardware for the
- * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- *     .name
- *     .ops
- *     .hw
- *     .parent_names
- *     .num_parents
- *     .flags
- *
- * It is not necessary to call clk_register if __clk_init is used directly with
- * statically initialized clock data.
- *
- * Returns 0 on success, otherwise an error code.
- */
-int __clk_init(struct device *dev, struct clk *clk);
-
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
-
-#endif /* CONFIG_COMMON_CLK */
-#endif /* CLK_PRIVATE_H */
index d936409520f8db609994f7ddab629a99981883dc..5591ea71a8d14054bf923dcad45dc94f71bd029a 100644 (file)
@@ -33,6 +33,7 @@
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
 
 struct clk_hw;
+struct clk_core;
 struct dentry;
 
 /**
@@ -174,9 +175,12 @@ struct clk_ops {
                                        unsigned long parent_rate);
        long            (*round_rate)(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *parent_rate);
-       long            (*determine_rate)(struct clk_hw *hw, unsigned long rate,
-                                       unsigned long *best_parent_rate,
-                                       struct clk_hw **best_parent_hw);
+       long            (*determine_rate)(struct clk_hw *hw,
+                                         unsigned long rate,
+                                         unsigned long min_rate,
+                                         unsigned long max_rate,
+                                         unsigned long *best_parent_rate,
+                                         struct clk_hw **best_parent_hw);
        int             (*set_parent)(struct clk_hw *hw, u8 index);
        u8              (*get_parent)(struct clk_hw *hw);
        int             (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -216,13 +220,17 @@ struct clk_init_data {
  * clk_foo and then referenced by the struct clk instance that uses struct
  * clk_foo's clk_ops
  *
- * @clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
+ * @core: pointer to the struct clk_core instance that points back to this
+ * struct clk_hw instance
+ *
+ * @clk: pointer to the per-user struct clk instance that can be used to call
+ * into the clk API
  *
  * @init: pointer to struct clk_init_data that contains the init data shared
  * with the common clock framework.
  */
 struct clk_hw {
+       struct clk_core *core;
        struct clk *clk;
        const struct clk_init_data *init;
 };
@@ -294,6 +302,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 bit_idx,
                u8 clk_gate_flags, spinlock_t *lock);
+void clk_unregister_gate(struct clk *clk);
 
 struct clk_div_table {
        unsigned int    val;
@@ -352,6 +361,17 @@ struct clk_divider {
 #define CLK_DIVIDER_READ_ONLY          BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
+
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+               unsigned int val, const struct clk_div_table *table,
+               unsigned long flags);
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *prate, const struct clk_div_table *table,
+               u8 width, unsigned long flags);
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+               const struct clk_div_table *table, u8 width,
+               unsigned long flags);
+
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -361,6 +381,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_divider_flags, const struct clk_div_table *table,
                spinlock_t *lock);
+void clk_unregister_divider(struct clk *clk);
 
 /**
  * struct clk_mux - multiplexer clock
@@ -382,6 +403,8 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
  *     register, and mask of mux bits are in higher 16-bit of this register.
  *     While setting the mux bits, higher 16-bit should also be updated to
  *     indicate changing mux bits.
+ * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
+ *     frequency.
  */
 struct clk_mux {
        struct clk_hw   hw;
@@ -396,7 +419,8 @@ struct clk_mux {
 #define CLK_MUX_INDEX_ONE              BIT(0)
 #define CLK_MUX_INDEX_BIT              BIT(1)
 #define CLK_MUX_HIWORD_MASK            BIT(2)
-#define CLK_MUX_READ_ONLY      BIT(3) /* mux setting cannot be changed */
+#define CLK_MUX_READ_ONLY              BIT(3) /* mux can't be changed */
+#define CLK_MUX_ROUND_CLOSEST          BIT(4)
 
 extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
@@ -411,6 +435,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
                void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 
+void clk_unregister_mux(struct clk *clk);
+
 void of_fixed_factor_clk_setup(struct device_node *node);
 
 /**
@@ -550,15 +576,29 @@ bool __clk_is_prepared(struct clk *clk);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p);
+unsigned long __clk_determine_rate(struct clk_hw *core,
+                                  unsigned long rate,
+                                  unsigned long min_rate,
+                                  unsigned long max_rate);
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p);
+
+static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+{
+       dst->clk = src->clk;
+       dst->core = src->core;
+}
 
 /*
  * FIXME clock api without lock protection
  */
-int __clk_prepare(struct clk *clk);
-void __clk_unprepare(struct clk *clk);
-void __clk_reparent(struct clk *clk, struct clk *new_parent);
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
 
 struct of_device_id;
index c7f258a81761d22b1b204654d3582af55044915a..8381bbfbc3085bcde157c02ea7234826f757a1e8 100644 (file)
@@ -301,6 +301,46 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
 
+/**
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
+ *
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
+ */
+bool clk_has_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
 /**
  * clk_set_parent - set the parent clock source for this clock
  * @clk: clock source
@@ -374,6 +414,11 @@ static inline long clk_round_rate(struct clk *clk, unsigned long rate)
        return 0;
 }
 
+static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+       return true;
+}
+
 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
 {
        return 0;
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644 (file)
index aed28c4..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2013 - Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-#include <linux/clk.h>
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output);
-
-#endif
index 3ca9fca827a2f1299ed7eea5ca9ec908e57f2fc8..19c4208f4752fd6957712b8a9e900a6baa0b4945 100644 (file)
@@ -120,6 +120,4 @@ static inline void tegra_cpu_clock_resume(void)
 }
 #endif
 
-void tegra_clocks_apply_init_table(void);
-
 #endif /* __LINUX_CLK_TEGRA_H_ */
index 55ef529a0dbf905995781bd051bf926396b837c5..67844003493de5936809dc1acac72cd88b5bc521 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __LINUX_CLK_TI_H__
 #define __LINUX_CLK_TI_H__
 
+#include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 
 /**
@@ -217,6 +218,13 @@ struct ti_dt_clk {
 /* Maximum number of clock memmaps */
 #define CLK_MAX_MEMMAPS                        4
 
+/* Static memmap indices */
+enum {
+       TI_CLKM_CM = 0,
+       TI_CLKM_PRM,
+       TI_CLKM_SCRM,
+};
+
 typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
 
 /**
@@ -263,6 +271,8 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
                                           u8 index);
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
                                       unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_clk);
 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
@@ -272,6 +282,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
                                    unsigned long *parent_rate);
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
                                        unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk);
 u8 omap2_init_dpll_parent(struct clk_hw *hw);
@@ -348,4 +360,17 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
 
+#ifdef CONFIG_ATAGS
+int omap3430_clk_legacy_init(void);
+int omap3430es1_clk_legacy_init(void);
+int omap36xx_clk_legacy_init(void);
+int am35xx_clk_legacy_init(void);
+#else
+static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
+static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
+#endif
+
+
 #endif
index d1ec10a940ffffb01a94bcedf9d113e1940619c7..1b45e4a0519b2c34033db91e37fd1f1f0b367f8c 100644 (file)
@@ -202,7 +202,7 @@ static __always_inline void data_access_exceeds_word_size(void)
 {
 }
 
-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
@@ -259,10 +259,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  */
 
 #define READ_ONCE(x) \
-       ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+       ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index 92c08cf7670e2afa128a1a15cd454b7f2398fbbf..d8358799c59411f5b715e9dfbb139b94b8dbca8f 100644 (file)
@@ -215,13 +215,16 @@ struct dentry_operations {
 #define DCACHE_LRU_LIST                        0x00080000
 
 #define DCACHE_ENTRY_TYPE              0x00700000
-#define DCACHE_MISS_TYPE               0x00000000 /* Negative dentry */
-#define DCACHE_DIRECTORY_TYPE          0x00100000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE            0x00200000 /* Lookupless directory (presumed automount) */
-#define DCACHE_SYMLINK_TYPE            0x00300000 /* Symlink */
-#define DCACHE_FILE_TYPE               0x00400000 /* Other file type */
+#define DCACHE_MISS_TYPE               0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
+#define DCACHE_WHITEOUT_TYPE           0x00100000 /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE          0x00200000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE            0x00300000 /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE            0x00400000 /* Regular file type (or fallthru to such) */
+#define DCACHE_SPECIAL_TYPE            0x00500000 /* Other file type (or fallthru to such) */
+#define DCACHE_SYMLINK_TYPE            0x00600000 /* Symlink (or fallthru to such) */
 
 #define DCACHE_MAY_FREE                        0x00800000
+#define DCACHE_FALLTHRU                        0x01000000 /* Fall through to lower layer */
 
 extern seqlock_t rename_lock;
 
@@ -423,6 +426,16 @@ static inline unsigned __d_entry_type(const struct dentry *dentry)
        return dentry->d_flags & DCACHE_ENTRY_TYPE;
 }
 
+static inline bool d_is_miss(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_whiteout(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
+}
+
 static inline bool d_can_lookup(const struct dentry *dentry)
 {
        return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
@@ -443,14 +456,25 @@ static inline bool d_is_symlink(const struct dentry *dentry)
        return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
 }
 
+static inline bool d_is_reg(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
+}
+
+static inline bool d_is_special(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
+}
+
 static inline bool d_is_file(const struct dentry *dentry)
 {
-       return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+       return d_is_reg(dentry) || d_is_special(dentry);
 }
 
 static inline bool d_is_negative(const struct dentry *dentry)
 {
-       return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+       // TODO: check d_is_whiteout(dentry) also.
+       return d_is_miss(dentry);
 }
 
 static inline bool d_is_positive(const struct dentry *dentry)
@@ -458,10 +482,75 @@ static inline bool d_is_positive(const struct dentry *dentry)
        return !d_is_negative(dentry);
 }
 
+extern void d_set_fallthru(struct dentry *dentry);
+
+static inline bool d_is_fallthru(const struct dentry *dentry)
+{
+       return dentry->d_flags & DCACHE_FALLTHRU;
+}
+
+
 extern int sysctl_vfs_cache_pressure;
 
 static inline unsigned long vfs_pressure_ratio(unsigned long val)
 {
        return mult_frac(val, sysctl_vfs_cache_pressure, 100);
 }
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+       return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+       return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file.  The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+       struct inode *inode = upper->d_inode;
+
+       return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file.  It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+       return upper;
+}
+
 #endif /* __LINUX_DCACHE_H */
index 40cd75e21ea2145fc535a255bfbd292f2a5e8a07..b6997a0cb5284759ee8ff107e9bb78ae8839f0c0 100644 (file)
@@ -188,25 +188,6 @@ enum dma_ctrl_flags {
        DMA_PREP_FENCE = (1 << 5),
 };
 
-/**
- * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
- * on a running channel.
- * @DMA_TERMINATE_ALL: terminate all ongoing transfers
- * @DMA_PAUSE: pause ongoing transfers
- * @DMA_RESUME: resume paused transfer
- * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
- * that need to runtime reconfigure the slave channels (as opposed to passing
- * configuration data in statically from the platform). An additional
- * argument of struct dma_slave_config must be passed in with this
- * command.
- */
-enum dma_ctrl_cmd {
-       DMA_TERMINATE_ALL,
-       DMA_PAUSE,
-       DMA_RESUME,
-       DMA_SLAVE_CONFIG,
-};
-
 /**
  * enum sum_check_bits - bit position of pq_check_flags
  */
@@ -298,6 +279,9 @@ enum dma_slave_buswidth {
        DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
        DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
        DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+       DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+       DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+       DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
 };
 
 /**
@@ -336,9 +320,8 @@ enum dma_slave_buswidth {
  * This struct is passed in as configuration data to a DMA engine
  * in order to set up a certain channel for DMA transport at runtime.
  * The DMA device/engine has to provide support for an additional
- * command in the channel config interface, DMA_SLAVE_CONFIG
- * and this struct will then be passed in as an argument to the
- * DMA engine device_control() function.
+ * callback in the dma_device structure, device_config and this struct
+ * will then be passed in as an argument to the function.
  *
  * The rationale for adding configuration information to this struct is as
  * follows: if it is likely that more than one DMA slave controllers in
@@ -387,7 +370,7 @@ enum dma_residue_granularity {
 /* struct dma_slave_caps - expose capabilities of a slave channel only
  *
  * @src_addr_widths: bit mask of src addr widths the channel supports
- * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @dst_addr_widths: bit mask of dstn addr widths the channel supports
  * @directions: bit mask of slave direction the channel supported
  *     since the enum dma_transfer_direction is not defined as bits for each
  *     type of direction, the dma controller should fill (1 << <TYPE>) and same
@@ -398,7 +381,7 @@ enum dma_residue_granularity {
  */
 struct dma_slave_caps {
        u32 src_addr_widths;
-       u32 dstn_addr_widths;
+       u32 dst_addr_widths;
        u32 directions;
        bool cmd_pause;
        bool cmd_terminate;
@@ -594,6 +577,14 @@ struct dma_tx_state {
  * @fill_align: alignment shift for memset operations
  * @dev_id: unique device ID
  * @dev: struct device reference for dma mapping api
+ * @src_addr_widths: bit mask of src addr widths the device supports
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
+ * @directions: bit mask of slave direction the device supports since
+ *     the enum dma_transfer_direction is not defined as bits for
+ *     each type of direction, the dma controller should fill (1 <<
+ *     <TYPE>) and same should be checked by controller as well
+ * @residue_granularity: granularity of the transfer residue reported
+ *     by tx_status
  * @device_alloc_chan_resources: allocate resources and return the
  *     number of allocated descriptors
  * @device_free_chan_resources: release DMA channel's resources
@@ -608,14 +599,19 @@ struct dma_tx_state {
  *     The function takes a buffer of size buf_len. The callback function will
  *     be called after period_len bytes have been transferred.
  * @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_control: manipulate all pending operations on a channel, returns
- *     zero or error code
+ * @device_config: Pushes a new configuration to a channel, return 0 or an error
+ *     code
+ * @device_pause: Pauses any transfer happening on a channel. Returns
+ *     0 or an error code
+ * @device_resume: Resumes any transfer on a channel previously
+ *     paused. Returns 0 or an error code
+ * @device_terminate_all: Aborts all transfers on a channel. Returns 0
+ *     or an error code
  * @device_tx_status: poll for transaction completion, the optional
  *     txstate parameter can be supplied with a pointer to get a
  *     struct with auxiliary transfer status information, otherwise the call
  *     will just return a simple status code
  * @device_issue_pending: push pending transactions to hardware
- * @device_slave_caps: return the slave channel capabilities
  */
 struct dma_device {
 
@@ -635,14 +631,19 @@ struct dma_device {
        int dev_id;
        struct device *dev;
 
+       u32 src_addr_widths;
+       u32 dst_addr_widths;
+       u32 directions;
+       enum dma_residue_granularity residue_granularity;
+
        int (*device_alloc_chan_resources)(struct dma_chan *chan);
        void (*device_free_chan_resources)(struct dma_chan *chan);
 
        struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
-               struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+               struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
                size_t len, unsigned long flags);
        struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
-               struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+               struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
                unsigned int src_cnt, size_t len, unsigned long flags);
        struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
                struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
@@ -674,31 +675,26 @@ struct dma_device {
        struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
                struct dma_chan *chan, struct dma_interleaved_template *xt,
                unsigned long flags);
-       int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-               unsigned long arg);
+
+       int (*device_config)(struct dma_chan *chan,
+                            struct dma_slave_config *config);
+       int (*device_pause)(struct dma_chan *chan);
+       int (*device_resume)(struct dma_chan *chan);
+       int (*device_terminate_all)(struct dma_chan *chan);
 
        enum dma_status (*device_tx_status)(struct dma_chan *chan,
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *txstate);
        void (*device_issue_pending)(struct dma_chan *chan);
-       int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
 };
 
-static inline int dmaengine_device_control(struct dma_chan *chan,
-                                          enum dma_ctrl_cmd cmd,
-                                          unsigned long arg)
-{
-       if (chan->device->device_control)
-               return chan->device->device_control(chan, cmd, arg);
-
-       return -ENOSYS;
-}
-
 static inline int dmaengine_slave_config(struct dma_chan *chan,
                                          struct dma_slave_config *config)
 {
-       return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
-                       (unsigned long)config);
+       if (chan->device->device_config)
+               return chan->device->device_config(chan, config);
+
+       return -ENOSYS;
 }
 
 static inline bool is_slave_direction(enum dma_transfer_direction direction)
@@ -765,34 +761,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
                        src_sg, src_nents, flags);
 }
 
-static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
-{
-       if (!chan || !caps)
-               return -EINVAL;
-
-       /* check if the channel supports slave transactions */
-       if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
-               return -ENXIO;
-
-       if (chan->device->device_slave_caps)
-               return chan->device->device_slave_caps(chan, caps);
-
-       return -ENXIO;
-}
-
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 {
-       return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+       if (chan->device->device_terminate_all)
+               return chan->device->device_terminate_all(chan);
+
+       return -ENOSYS;
 }
 
 static inline int dmaengine_pause(struct dma_chan *chan)
 {
-       return dmaengine_device_control(chan, DMA_PAUSE, 0);
+       if (chan->device->device_pause)
+               return chan->device->device_pause(chan);
+
+       return -ENOSYS;
 }
 
 static inline int dmaengine_resume(struct dma_chan *chan)
 {
-       return dmaengine_device_control(chan, DMA_RESUME, 0);
+       if (chan->device->device_resume)
+               return chan->device->device_resume(chan);
+
+       return -ENOSYS;
 }
 
 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
@@ -1059,6 +1049,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
                                                  const char *name);
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 void dma_release_channel(struct dma_chan *chan);
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
 #else
 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {
@@ -1093,6 +1084,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
 static inline void dma_release_channel(struct dma_chan *chan)
 {
 }
+static inline int dma_get_slave_caps(struct dma_chan *chan,
+                                    struct dma_slave_caps *caps)
+{
+       return -ENXIO;
+}
 #endif
 
 /* --- DMA device --- */
index 447932aed1e194d7e258bd632bd69eee5d040c8e..b4d71b5e1ff23a2d3f3ec5c481937edd571ead4d 100644 (file)
@@ -968,9 +968,6 @@ struct file_lock_context {
        struct list_head        flc_flock;
        struct list_head        flc_posix;
        struct list_head        flc_lease;
-       int                     flc_flock_cnt;
-       int                     flc_posix_cnt;
-       int                     flc_lease_cnt;
 };
 
 /* The following constant reflects the upper bound of the file/locking space */
index 51f7ccadf923c337ddb5627491a958c5e74fa610..4173a8fdad9efd052870b8738547ac1fa1962526 100644 (file)
@@ -33,6 +33,8 @@
  * @units:             Measurment unit for this attribute.
  * @unit_expo:         Exponent used in the data.
  * @size:              Size in bytes for data size.
+ * @logical_minimum:   Logical minimum value for this attribute.
+ * @logical_maximum:   Logical maximum value for this attribute.
  */
 struct hid_sensor_hub_attribute_info {
        u32 usage_id;
@@ -146,6 +148,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
 
 /**
 * sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @hsdev:      Hub device instance.
 * @usage_id:   Attribute usage id of parent physical device as per spec
 * @attr_usage_id:      Attribute usage id as per spec
 * @report_id:  Report id to look for
@@ -160,6 +163,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                        u32 attr_usage_id, u32 report_id);
 /**
 * sensor_hub_set_feature() - Feature set request
+* @hsdev:      Hub device instance.
 * @report_id:  Report id to look for
 * @field_index:        Field index inside a report
 * @value:      Value to set
@@ -172,6 +176,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
 
 /**
 * sensor_hub_get_feature() - Feature get request
+* @hsdev:      Hub device instance.
 * @report_id:  Report id to look for
 * @field_index:        Field index inside a report
 * @value:      Place holder for return value
index 7c7695940dddeae9d3d22129ce4a14eaf70e1a5e..f17da50402a4dad6bf4d4aa907a32e8df1d7dea7 100644 (file)
@@ -130,8 +130,6 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
  * @probe: Callback for device binding
  * @remove: Callback for device unbinding
  * @shutdown: Callback for device shutdown
- * @suspend: Callback for device suspend
- * @resume: Callback for device resume
  * @alert: Alert callback, for example for the SMBus alert protocol
  * @command: Callback for bus-wide signaling (optional)
  * @driver: Device driver model driver
@@ -174,8 +172,6 @@ struct i2c_driver {
 
        /* driver model interfaces that don't relate to enumeration  */
        void (*shutdown)(struct i2c_client *);
-       int (*suspend)(struct i2c_client *, pm_message_t mesg);
-       int (*resume)(struct i2c_client *);
 
        /* Alert callback, for example for the SMBus alert protocol.
         * The format and meaning of the data value depends on the protocol.
index a57bca2ea97e51058283dfa9305aff9f9244b12e..dad8b00beed27220856985c984e620d88cafd736 100644 (file)
@@ -44,6 +44,7 @@ struct br_ip_list {
 #define BR_PROMISC             BIT(7)
 #define BR_PROXYARP            BIT(8)
 #define BR_LEARNING_SYNC       BIT(9)
+#define BR_PROXYARP_WIFI       BIT(10)
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
index 420f77b34d02639192bae47ae2fcc95462eaf8e6..e6a6aac451db4614df9cc4e58911f9ff6629620d 100644 (file)
@@ -243,7 +243,6 @@ extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
-extern unsigned int gic_get_timer_pending(void);
 extern int gic_get_c0_compare_int(void);
 extern int gic_get_c0_perfcount_int(void);
 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
index 75ae2e2631fceaa27915f3d100b1f03244b17500..a19bcf9e762e1d4886e8bd5ed202e4485ba8cf3e 100644 (file)
@@ -156,8 +156,14 @@ typedef enum {
        KDB_REASON_SYSTEM_NMI,  /* In NMI due to SYSTEM cmd; regs valid */
 } kdb_reason_t;
 
+enum kdb_msgsrc {
+       KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
+       KDB_MSGSRC_PRINTK, /* trapped from printk() */
+};
+
 extern int kdb_trap_printk;
-extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
+extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+                                     va_list args);
 extern __printf(1, 2) int kdb_printf(const char *, ...);
 typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
 
index 495203ff221c3e4039557fffafa56b22d38882c3..acd5b12565cc2725724ed8a65692df9142634fae 100644 (file)
@@ -8,52 +8,13 @@
  *
  * The Guest needs devices to do anything useful.  Since we don't let it touch
  * real devices (think of the damage it could do!) we provide virtual devices.
- * We could emulate a PCI bus with various devices on it, but that is a fairly
- * complex burden for the Host and suboptimal for the Guest, so we have our own
- * simple lguest bus and we use "virtio" drivers.  These drivers need a set of
- * routines from us which will actually do the virtual I/O, but they handle all
- * the net/block/console stuff themselves.  This means that if we want to add
- * a new device, we simply need to write a new virtio driver and create support
- * for it in the Launcher: this code won't need to change.
+ * We emulate a PCI bus with virtio devices on it; we used to have our own
+ * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
  *
  * Virtio devices are also used by kvm, so we can simply reuse their optimized
  * device drivers.  And one day when everyone uses virtio, my plan will be
  * complete.  Bwahahahah!
- *
- * Devices are described by a simplified ID, a status byte, and some "config"
- * bytes which describe this device's configuration.  This is placed by the
- * Launcher just above the top of physical memory:
- */
-struct lguest_device_desc {
-       /* The device type: console, network, disk etc.  Type 0 terminates. */
-       __u8 type;
-       /* The number of virtqueues (first in config array) */
-       __u8 num_vq;
-       /*
-        * The number of bytes of feature bits.  Multiply by 2: one for host
-        * features and one for Guest acknowledgements.
-        */
-       __u8 feature_len;
-       /* The number of bytes of the config array after virtqueues. */
-       __u8 config_len;
-       /* A status byte, written by the Guest. */
-       __u8 status;
-       __u8 config[0];
-};
-
-/*D:135
- * This is how we expect the device configuration field for a virtqueue
- * to be laid out in config space.
  */
-struct lguest_vqconfig {
-       /* The number of entries in the virtio_ring */
-       __u16 num;
-       /* The interrupt we get when something happens. */
-       __u16 irq;
-       /* The page number of the virtio ring for this device. */
-       __u32 pfn;
-};
-/*:*/
 
 /* Write command first word is a request. */
 enum lguest_req
@@ -62,12 +23,22 @@ enum lguest_req
        LHREQ_GETDMA, /* No longer used */
        LHREQ_IRQ, /* + irq */
        LHREQ_BREAK, /* No longer used */
-       LHREQ_EVENTFD, /* + address, fd. */
+       LHREQ_EVENTFD, /* No longer used. */
+       LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
+       LHREQ_SETREG, /* + offset within struct pt_regs, value. */
+       LHREQ_TRAP, /* + trap number to deliver to guest. */
 };
 
 /*
- * The alignment to use between consumer and producer parts of vring.
- * x86 pagesize for historical reasons.
+ * This is what read() of the lguest fd populates.  trap ==
+ * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
+ * argument), 14 for a page fault in the MMIO region (addr is
+ * the trap address, insn is the instruction), or 13 for a GPF
+ * (insn is the instruction).
  */
-#define LGUEST_VRING_ALIGN     4096
+struct lguest_pending {
+       __u8 trap;
+       __u8 insn[7];
+       __u32 addr;
+};
 #endif /* _LINUX_LGUEST_LAUNCHER */
index 81589d176ae873c3dbd9cec1c27bdfcadb3679fb..dfabd6db7ddf7c1cba523a022aa5dcf008671652 100644 (file)
@@ -124,10 +124,27 @@ enum {
 #define AXP288_PMIC_ADC_H               0x56
 #define AXP288_PMIC_ADC_L               0x57
 #define AXP288_ADC_TS_PIN_CTRL          0x84
-
 #define AXP288_PMIC_ADC_EN              0x84
-#define AXP288_FG_TUNE5                        0xed
 
+/* Fuel Gauge */
+#define AXP288_FG_RDC1_REG          0xba
+#define AXP288_FG_RDC0_REG          0xbb
+#define AXP288_FG_OCVH_REG          0xbc
+#define AXP288_FG_OCVL_REG          0xbd
+#define AXP288_FG_OCV_CURVE_REG     0xc0
+#define AXP288_FG_DES_CAP1_REG      0xe0
+#define AXP288_FG_DES_CAP0_REG      0xe1
+#define AXP288_FG_CC_MTR1_REG       0xe2
+#define AXP288_FG_CC_MTR0_REG       0xe3
+#define AXP288_FG_OCV_CAP_REG       0xe4
+#define AXP288_FG_CC_CAP_REG        0xe5
+#define AXP288_FG_LOW_CAP_REG       0xe6
+#define AXP288_FG_TUNE0             0xe8
+#define AXP288_FG_TUNE1             0xe9
+#define AXP288_FG_TUNE2             0xea
+#define AXP288_FG_TUNE3             0xeb
+#define AXP288_FG_TUNE4             0xec
+#define AXP288_FG_TUNE5             0xed
 
 /* Regulators IDs */
 enum {
@@ -236,4 +253,26 @@ struct axp20x_dev {
        const struct regmap_irq_chip    *regmap_irq_chip;
 };
 
+#define BATTID_LEN                             64
+#define OCV_CURVE_SIZE                 32
+#define MAX_THERM_CURVE_SIZE   25
+#define PD_DEF_MIN_TEMP                        0
+#define PD_DEF_MAX_TEMP                        55
+
+struct axp20x_fg_pdata {
+       char battid[BATTID_LEN + 1];
+       int design_cap;
+       int min_volt;
+       int max_volt;
+       int max_temp;
+       int min_temp;
+       int cap1;
+       int cap0;
+       int rdc1;
+       int rdc0;
+       int ocv_curve[OCV_CURVE_SIZE];
+       int tcsz;
+       int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
+};
+
 #endif /* __LINUX_MFD_AXP20X_H */
index b92a3262f8f6cd19130cafaaa80f7cf455f39c0c..79f4d822ba133a5b52a4d39868b52c04ff554fd0 100644 (file)
@@ -36,6 +36,7 @@ enum da9063_models {
 enum da9063_variant_codes {
        PMIC_DA9063_AD = 0x3,
        PMIC_DA9063_BB = 0x5,
+       PMIC_DA9063_CA = 0x6,
 };
 
 /* Interrupts */
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h
new file mode 100644 (file)
index 0000000..76e6689
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * DA9150 MFD Driver - Core Data
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __DA9150_CORE_H
+#define __DA9150_CORE_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+
+/* I2C address paging */
+#define DA9150_REG_PAGE_SHIFT  8
+#define DA9150_REG_PAGE_MASK   0xFF
+
+/* IRQs */
+#define DA9150_NUM_IRQ_REGS    4
+#define DA9150_IRQ_VBUS                0
+#define DA9150_IRQ_CHG         1
+#define DA9150_IRQ_TCLASS      2
+#define DA9150_IRQ_TJUNC       3
+#define DA9150_IRQ_VFAULT      4
+#define DA9150_IRQ_CONF                5
+#define DA9150_IRQ_DAT         6
+#define DA9150_IRQ_DTYPE       7
+#define DA9150_IRQ_ID          8
+#define DA9150_IRQ_ADP         9
+#define DA9150_IRQ_SESS_END    10
+#define DA9150_IRQ_SESS_VLD    11
+#define DA9150_IRQ_FG          12
+#define DA9150_IRQ_GP          13
+#define DA9150_IRQ_TBAT                14
+#define DA9150_IRQ_GPIOA       15
+#define DA9150_IRQ_GPIOB       16
+#define DA9150_IRQ_GPIOC       17
+#define DA9150_IRQ_GPIOD       18
+#define DA9150_IRQ_GPADC       19
+#define DA9150_IRQ_WKUP                20
+
+struct da9150_pdata {
+       int irq_base;
+};
+
+struct da9150 {
+       struct device *dev;
+       struct regmap *regmap;
+       struct regmap_irq_chip_data *regmap_irq_data;
+       int irq;
+       int irq_base;
+};
+
+/* Device I/O */
+u8 da9150_reg_read(struct da9150 *da9150, u16 reg);
+void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val);
+void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val);
+
+void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf);
+void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf);
+#endif /* __DA9150_CORE_H */
diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h
new file mode 100644 (file)
index 0000000..27ca6ee
--- /dev/null
@@ -0,0 +1,1155 @@
+/*
+ * DA9150 MFD Driver - Registers
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __DA9150_REGISTERS_H
+#define __DA9150_REGISTERS_H
+
+#include <linux/bitops.h>
+
+/* Registers */
+#define DA9150_PAGE_CON                        0x000
+#define DA9150_STATUS_A                        0x068
+#define DA9150_STATUS_B                        0x069
+#define DA9150_STATUS_C                        0x06A
+#define DA9150_STATUS_D                        0x06B
+#define DA9150_STATUS_E                        0x06C
+#define DA9150_STATUS_F                        0x06D
+#define DA9150_STATUS_G                        0x06E
+#define DA9150_STATUS_H                        0x06F
+#define DA9150_STATUS_I                        0x070
+#define DA9150_STATUS_J                        0x071
+#define DA9150_STATUS_K                        0x072
+#define DA9150_STATUS_L                        0x073
+#define DA9150_STATUS_N                        0x074
+#define DA9150_FAULT_LOG_A             0x076
+#define DA9150_FAULT_LOG_B             0x077
+#define DA9150_EVENT_E                 0x078
+#define DA9150_EVENT_F                 0x079
+#define DA9150_EVENT_G                 0x07A
+#define DA9150_EVENT_H                 0x07B
+#define DA9150_IRQ_MASK_E              0x07C
+#define DA9150_IRQ_MASK_F              0x07D
+#define DA9150_IRQ_MASK_G              0x07E
+#define DA9150_IRQ_MASK_H              0x07F
+#define DA9150_PAGE_CON_1              0x080
+#define DA9150_CONFIG_A                        0x0E0
+#define DA9150_CONFIG_B                        0x0E1
+#define DA9150_CONFIG_C                        0x0E2
+#define DA9150_CONFIG_D                        0x0E3
+#define DA9150_CONFIG_E                        0x0E4
+#define DA9150_CONTROL_A               0x0E5
+#define DA9150_CONTROL_B               0x0E6
+#define DA9150_CONTROL_C               0x0E7
+#define DA9150_GPIO_A_B                        0x0E8
+#define DA9150_GPIO_C_D                        0x0E9
+#define DA9150_GPIO_MODE_CONT          0x0EA
+#define DA9150_GPIO_CTRL_B             0x0EB
+#define DA9150_GPIO_CTRL_A             0x0EC
+#define DA9150_GPIO_CTRL_C             0x0ED
+#define DA9150_GPIO_CFG_A              0x0EE
+#define DA9150_GPIO_CFG_B              0x0EF
+#define DA9150_GPIO_CFG_C              0x0F0
+#define DA9150_GPADC_MAN               0x0F2
+#define DA9150_GPADC_RES_A             0x0F4
+#define DA9150_GPADC_RES_B             0x0F5
+#define DA9150_PAGE_CON_2              0x100
+#define DA9150_OTP_CONT_SHARED         0x101
+#define DA9150_INTERFACE_SHARED                0x105
+#define DA9150_CONFIG_A_SHARED         0x106
+#define DA9150_CONFIG_D_SHARED         0x109
+#define DA9150_ADETVB_CFG_C            0x150
+#define DA9150_ADETD_STAT              0x151
+#define DA9150_ADET_CMPSTAT            0x152
+#define DA9150_ADET_CTRL_A             0x153
+#define DA9150_ADETVB_CFG_B            0x154
+#define DA9150_ADETVB_CFG_A            0x155
+#define DA9150_ADETAC_CFG_A            0x156
+#define DA9150_ADDETAC_CFG_B           0x157
+#define DA9150_ADETAC_CFG_C            0x158
+#define DA9150_ADETAC_CFG_D            0x159
+#define DA9150_ADETVB_CFG_D            0x15A
+#define DA9150_ADETID_CFG_A            0x15B
+#define DA9150_ADET_RID_PT_CHG_H       0x15C
+#define DA9150_ADET_RID_PT_CHG_L       0x15D
+#define DA9150_PPR_TCTR_B              0x160
+#define DA9150_PPR_BKCTRL_A            0x163
+#define DA9150_PPR_BKCFG_A             0x164
+#define DA9150_PPR_BKCFG_B             0x165
+#define DA9150_PPR_CHGCTRL_A           0x166
+#define DA9150_PPR_CHGCTRL_B           0x167
+#define DA9150_PPR_CHGCTRL_C           0x168
+#define DA9150_PPR_TCTR_A              0x169
+#define DA9150_PPR_CHGCTRL_D           0x16A
+#define DA9150_PPR_CHGCTRL_E           0x16B
+#define DA9150_PPR_CHGCTRL_F           0x16C
+#define DA9150_PPR_CHGCTRL_G           0x16D
+#define DA9150_PPR_CHGCTRL_H           0x16E
+#define DA9150_PPR_CHGCTRL_I           0x16F
+#define DA9150_PPR_CHGCTRL_J           0x170
+#define DA9150_PPR_CHGCTRL_K           0x171
+#define DA9150_PPR_CHGCTRL_L           0x172
+#define DA9150_PPR_CHGCTRL_M           0x173
+#define DA9150_PPR_THYST_A             0x174
+#define DA9150_PPR_THYST_B             0x175
+#define DA9150_PPR_THYST_C             0x176
+#define DA9150_PPR_THYST_D             0x177
+#define DA9150_PPR_THYST_E             0x178
+#define DA9150_PPR_THYST_F             0x179
+#define DA9150_PPR_THYST_G             0x17A
+#define DA9150_PAGE_CON_3              0x180
+#define DA9150_PAGE_CON_4              0x200
+#define DA9150_PAGE_CON_5              0x280
+#define DA9150_PAGE_CON_6              0x300
+#define DA9150_COREBTLD_STAT_A         0x302
+#define DA9150_COREBTLD_CTRL_A         0x303
+#define DA9150_CORE_CONFIG_A           0x304
+#define DA9150_CORE_CONFIG_C           0x305
+#define DA9150_CORE_CONFIG_B           0x306
+#define DA9150_CORE_CFG_DATA_A         0x307
+#define DA9150_CORE_CFG_DATA_B         0x308
+#define DA9150_CORE_CMD_A              0x309
+#define DA9150_CORE_DATA_A             0x30A
+#define DA9150_CORE_DATA_B             0x30B
+#define DA9150_CORE_DATA_C             0x30C
+#define DA9150_CORE_DATA_D             0x30D
+#define DA9150_CORE2WIRE_STAT_A                0x310
+#define DA9150_CORE2WIRE_CTRL_A                0x311
+#define DA9150_FW_CTRL_A               0x312
+#define DA9150_FW_CTRL_C               0x313
+#define DA9150_FW_CTRL_D               0x314
+#define DA9150_FG_CTRL_A               0x315
+#define DA9150_FG_CTRL_B               0x316
+#define DA9150_FW_CTRL_E               0x317
+#define DA9150_FW_CTRL_B               0x318
+#define DA9150_GPADC_CMAN              0x320
+#define DA9150_GPADC_CRES_A            0x322
+#define DA9150_GPADC_CRES_B            0x323
+#define DA9150_CC_CFG_A                        0x328
+#define DA9150_CC_CFG_B                        0x329
+#define DA9150_CC_ICHG_RES_A           0x32A
+#define DA9150_CC_ICHG_RES_B           0x32B
+#define DA9150_CC_IAVG_RES_A           0x32C
+#define DA9150_CC_IAVG_RES_B           0x32D
+#define DA9150_TAUX_CTRL_A             0x330
+#define DA9150_TAUX_RELOAD_H           0x332
+#define DA9150_TAUX_RELOAD_L           0x333
+#define DA9150_TAUX_VALUE_H            0x334
+#define DA9150_TAUX_VALUE_L            0x335
+#define DA9150_AUX_DATA_0              0x338
+#define DA9150_AUX_DATA_1              0x339
+#define DA9150_AUX_DATA_2              0x33A
+#define DA9150_AUX_DATA_3              0x33B
+#define DA9150_BIF_CTRL                        0x340
+#define DA9150_TBAT_CTRL_A             0x342
+#define DA9150_TBAT_CTRL_B             0x343
+#define DA9150_TBAT_RES_A              0x344
+#define DA9150_TBAT_RES_B              0x345
+
+/* DA9150_PAGE_CON = 0x000 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_I2C_PAGE_SHIFT                  1
+#define DA9150_I2C_PAGE_MASK                   (0x1f << 1)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_STATUS_A = 0x068 */
+#define DA9150_WKUP_STAT_SHIFT                 2
+#define DA9150_WKUP_STAT_MASK                  (0x0f << 2)
+#define DA9150_SLEEP_STAT_SHIFT                        6
+#define DA9150_SLEEP_STAT_MASK                 (0x03 << 6)
+
+/* DA9150_STATUS_B = 0x069 */
+#define DA9150_VFAULT_STAT_SHIFT               0
+#define DA9150_VFAULT_STAT_MASK                        BIT(0)
+#define DA9150_TFAULT_STAT_SHIFT               1
+#define DA9150_TFAULT_STAT_MASK                        BIT(1)
+
+/* DA9150_STATUS_C = 0x06A */
+#define DA9150_VDD33_STAT_SHIFT                        0
+#define DA9150_VDD33_STAT_MASK                 BIT(0)
+#define DA9150_VDD33_SLEEP_SHIFT               1
+#define DA9150_VDD33_SLEEP_MASK                        BIT(1)
+#define DA9150_LFOSC_STAT_SHIFT                        7
+#define DA9150_LFOSC_STAT_MASK                 BIT(7)
+
+/* DA9150_STATUS_D = 0x06B */
+#define DA9150_GPIOA_STAT_SHIFT                        0
+#define DA9150_GPIOA_STAT_MASK                 BIT(0)
+#define DA9150_GPIOB_STAT_SHIFT                        1
+#define DA9150_GPIOB_STAT_MASK                 BIT(1)
+#define DA9150_GPIOC_STAT_SHIFT                        2
+#define DA9150_GPIOC_STAT_MASK                 BIT(2)
+#define DA9150_GPIOD_STAT_SHIFT                        3
+#define DA9150_GPIOD_STAT_MASK                 BIT(3)
+
+/* DA9150_STATUS_E = 0x06C */
+#define DA9150_DTYPE_SHIFT                     0
+#define DA9150_DTYPE_MASK                      (0x1f << 0)
+#define DA9150_DTYPE_DT_NIL                    (0x00 << 0)
+#define DA9150_DTYPE_DT_USB_OTG                        BIT(0)
+#define DA9150_DTYPE_DT_USB_STD                        (0x02 << 0)
+#define DA9150_DTYPE_DT_USB_CHG                        (0x03 << 0)
+#define DA9150_DTYPE_DT_ACA_CHG                        (0x04 << 0)
+#define DA9150_DTYPE_DT_ACA_OTG                        (0x05 << 0)
+#define DA9150_DTYPE_DT_ACA_DOC                        (0x06 << 0)
+#define DA9150_DTYPE_DT_DED_CHG                        (0x07 << 0)
+#define DA9150_DTYPE_DT_CR5_CHG                        (0x08 << 0)
+#define DA9150_DTYPE_DT_CR4_CHG                        (0x0c << 0)
+#define DA9150_DTYPE_DT_PT_CHG                 (0x11 << 0)
+#define DA9150_DTYPE_DT_NN_ACC                 (0x16 << 0)
+#define DA9150_DTYPE_DT_NN_CHG                 (0x17 << 0)
+
+/* DA9150_STATUS_F = 0x06D */
+#define DA9150_SESS_VLD_SHIFT                  0
+#define DA9150_SESS_VLD_MASK                   BIT(0)
+#define DA9150_ID_ERR_SHIFT                    1
+#define DA9150_ID_ERR_MASK                     BIT(1)
+#define DA9150_PT_CHG_SHIFT                    2
+#define DA9150_PT_CHG_MASK                     BIT(2)
+
+/* DA9150_STATUS_G = 0x06E */
+#define DA9150_RID_SHIFT                       0
+#define DA9150_RID_MASK                                (0xff << 0)
+
+/* DA9150_STATUS_H = 0x06F */
+#define DA9150_VBUS_STAT_SHIFT                 0
+#define DA9150_VBUS_STAT_MASK                  (0x07 << 0)
+#define DA9150_VBUS_STAT_OFF                   (0x00 << 0)
+#define DA9150_VBUS_STAT_WAIT                  BIT(0)
+#define DA9150_VBUS_STAT_CHG                   (0x02 << 0)
+#define DA9150_VBUS_TRED_SHIFT                 3
+#define DA9150_VBUS_TRED_MASK                  BIT(3)
+#define DA9150_VBUS_DROP_STAT_SHIFT            4
+#define DA9150_VBUS_DROP_STAT_MASK             (0x0f << 4)
+
+/* DA9150_STATUS_I = 0x070 */
+#define DA9150_VBUS_ISET_STAT_SHIFT            0
+#define DA9150_VBUS_ISET_STAT_MASK             (0x1f << 0)
+#define DA9150_VBUS_OT_SHIFT                   7
+#define DA9150_VBUS_OT_MASK                    BIT(7)
+
+/* DA9150_STATUS_J = 0x071 */
+#define DA9150_CHG_STAT_SHIFT                  0
+#define DA9150_CHG_STAT_MASK                   (0x0f << 0)
+#define DA9150_CHG_STAT_OFF                    (0x00 << 0)
+#define DA9150_CHG_STAT_SUSP                   BIT(0)
+#define DA9150_CHG_STAT_ACT                    (0x02 << 0)
+#define DA9150_CHG_STAT_PRE                    (0x03 << 0)
+#define DA9150_CHG_STAT_CC                     (0x04 << 0)
+#define DA9150_CHG_STAT_CV                     (0x05 << 0)
+#define DA9150_CHG_STAT_FULL                   (0x06 << 0)
+#define DA9150_CHG_STAT_TEMP                   (0x07 << 0)
+#define DA9150_CHG_STAT_TIME                   (0x08 << 0)
+#define DA9150_CHG_STAT_BAT                    (0x09 << 0)
+#define DA9150_CHG_TEMP_SHIFT                  4
+#define DA9150_CHG_TEMP_MASK                   (0x07 << 4)
+#define DA9150_CHG_TEMP_UNDER                  (0x06 << 4)
+#define DA9150_CHG_TEMP_OVER                   (0x07 << 4)
+#define DA9150_CHG_IEND_STAT_SHIFT             7
+#define DA9150_CHG_IEND_STAT_MASK              BIT(7)
+
+/* DA9150_STATUS_K = 0x072 */
+#define DA9150_CHG_IAV_H_SHIFT                 0
+#define DA9150_CHG_IAV_H_MASK                  (0xff << 0)
+
+/* DA9150_STATUS_L = 0x073 */
+#define DA9150_CHG_IAV_L_SHIFT                 5
+#define DA9150_CHG_IAV_L_MASK                  (0x07 << 5)
+
+/* DA9150_STATUS_N = 0x074 */
+#define DA9150_CHG_TIME_SHIFT                  1
+#define DA9150_CHG_TIME_MASK                   BIT(1)
+#define DA9150_CHG_TRED_SHIFT                  2
+#define DA9150_CHG_TRED_MASK                   BIT(2)
+#define DA9150_CHG_TJUNC_CLASS_SHIFT           3
+#define DA9150_CHG_TJUNC_CLASS_MASK            (0x07 << 3)
+#define DA9150_CHG_TJUNC_CLASS_6               (0x06 << 3)
+#define DA9150_EBS_STAT_SHIFT                  6
+#define DA9150_EBS_STAT_MASK                   BIT(6)
+#define DA9150_CHG_BAT_REMOVED_SHIFT           7
+#define DA9150_CHG_BAT_REMOVED_MASK            BIT(7)
+
+/* DA9150_FAULT_LOG_A = 0x076 */
+#define DA9150_TEMP_FAULT_SHIFT                        0
+#define DA9150_TEMP_FAULT_MASK                 BIT(0)
+#define DA9150_VSYS_FAULT_SHIFT                        1
+#define DA9150_VSYS_FAULT_MASK                 BIT(1)
+#define DA9150_START_FAULT_SHIFT               2
+#define DA9150_START_FAULT_MASK                        BIT(2)
+#define DA9150_EXT_FAULT_SHIFT                 3
+#define DA9150_EXT_FAULT_MASK                  BIT(3)
+#define DA9150_POR_FAULT_SHIFT                 4
+#define DA9150_POR_FAULT_MASK                  BIT(4)
+
+/* DA9150_FAULT_LOG_B = 0x077 */
+#define DA9150_VBUS_FAULT_SHIFT                        0
+#define DA9150_VBUS_FAULT_MASK                 BIT(0)
+#define DA9150_OTG_FAULT_SHIFT                 1
+#define DA9150_OTG_FAULT_MASK                  BIT(1)
+
+/* DA9150_EVENT_E = 0x078 */
+#define DA9150_E_VBUS_SHIFT                    0
+#define DA9150_E_VBUS_MASK                     BIT(0)
+#define DA9150_E_CHG_SHIFT                     1
+#define DA9150_E_CHG_MASK                      BIT(1)
+#define DA9150_E_TCLASS_SHIFT                  2
+#define DA9150_E_TCLASS_MASK                   BIT(2)
+#define DA9150_E_TJUNC_SHIFT                   3
+#define DA9150_E_TJUNC_MASK                    BIT(3)
+#define DA9150_E_VFAULT_SHIFT                  4
+#define DA9150_E_VFAULT_MASK                   BIT(4)
+#define DA9150_EVENTS_H_SHIFT                  5
+#define DA9150_EVENTS_H_MASK                   BIT(5)
+#define DA9150_EVENTS_G_SHIFT                  6
+#define DA9150_EVENTS_G_MASK                   BIT(6)
+#define DA9150_EVENTS_F_SHIFT                  7
+#define DA9150_EVENTS_F_MASK                   BIT(7)
+
+/* DA9150_EVENT_F = 0x079 */
+#define DA9150_E_CONF_SHIFT                    0
+#define DA9150_E_CONF_MASK                     BIT(0)
+#define DA9150_E_DAT_SHIFT                     1
+#define DA9150_E_DAT_MASK                      BIT(1)
+#define DA9150_E_DTYPE_SHIFT                   3
+#define DA9150_E_DTYPE_MASK                    BIT(3)
+#define DA9150_E_ID_SHIFT                      4
+#define DA9150_E_ID_MASK                       BIT(4)
+#define DA9150_E_ADP_SHIFT                     5
+#define DA9150_E_ADP_MASK                      BIT(5)
+#define DA9150_E_SESS_END_SHIFT                        6
+#define DA9150_E_SESS_END_MASK                 BIT(6)
+#define DA9150_E_SESS_VLD_SHIFT                        7
+#define DA9150_E_SESS_VLD_MASK                 BIT(7)
+
+/* DA9150_EVENT_G = 0x07A */
+#define DA9150_E_FG_SHIFT                      0
+#define DA9150_E_FG_MASK                       BIT(0)
+#define DA9150_E_GP_SHIFT                      1
+#define DA9150_E_GP_MASK                       BIT(1)
+#define DA9150_E_TBAT_SHIFT                    2
+#define DA9150_E_TBAT_MASK                     BIT(2)
+#define DA9150_E_GPIOA_SHIFT                   3
+#define DA9150_E_GPIOA_MASK                    BIT(3)
+#define DA9150_E_GPIOB_SHIFT                   4
+#define DA9150_E_GPIOB_MASK                    BIT(4)
+#define DA9150_E_GPIOC_SHIFT                   5
+#define DA9150_E_GPIOC_MASK                    BIT(5)
+#define DA9150_E_GPIOD_SHIFT                   6
+#define DA9150_E_GPIOD_MASK                    BIT(6)
+#define DA9150_E_GPADC_SHIFT                   7
+#define DA9150_E_GPADC_MASK                    BIT(7)
+
+/* DA9150_EVENT_H = 0x07B */
+#define DA9150_E_WKUP_SHIFT                    0
+#define DA9150_E_WKUP_MASK                     BIT(0)
+
+/* DA9150_IRQ_MASK_E = 0x07C */
+#define DA9150_M_VBUS_SHIFT                    0
+#define DA9150_M_VBUS_MASK                     BIT(0)
+#define DA9150_M_CHG_SHIFT                     1
+#define DA9150_M_CHG_MASK                      BIT(1)
+#define DA9150_M_TJUNC_SHIFT                   3
+#define DA9150_M_TJUNC_MASK                    BIT(3)
+#define DA9150_M_VFAULT_SHIFT                  4
+#define DA9150_M_VFAULT_MASK                   BIT(4)
+
+/* DA9150_IRQ_MASK_F = 0x07D */
+#define DA9150_M_CONF_SHIFT                    0
+#define DA9150_M_CONF_MASK                     BIT(0)
+#define DA9150_M_DAT_SHIFT                     1
+#define DA9150_M_DAT_MASK                      BIT(1)
+#define DA9150_M_DTYPE_SHIFT                   3
+#define DA9150_M_DTYPE_MASK                    BIT(3)
+#define DA9150_M_ID_SHIFT                      4
+#define DA9150_M_ID_MASK                       BIT(4)
+#define DA9150_M_ADP_SHIFT                     5
+#define DA9150_M_ADP_MASK                      BIT(5)
+#define DA9150_M_SESS_END_SHIFT                        6
+#define DA9150_M_SESS_END_MASK                 BIT(6)
+#define DA9150_M_SESS_VLD_SHIFT                        7
+#define DA9150_M_SESS_VLD_MASK                 BIT(7)
+
+/* DA9150_IRQ_MASK_G = 0x07E */
+#define DA9150_M_FG_SHIFT                      0
+#define DA9150_M_FG_MASK                       BIT(0)
+#define DA9150_M_GP_SHIFT                      1
+#define DA9150_M_GP_MASK                       BIT(1)
+#define DA9150_M_TBAT_SHIFT                    2
+#define DA9150_M_TBAT_MASK                     BIT(2)
+#define DA9150_M_GPIOA_SHIFT                   3
+#define DA9150_M_GPIOA_MASK                    BIT(3)
+#define DA9150_M_GPIOB_SHIFT                   4
+#define DA9150_M_GPIOB_MASK                    BIT(4)
+#define DA9150_M_GPIOC_SHIFT                   5
+#define DA9150_M_GPIOC_MASK                    BIT(5)
+#define DA9150_M_GPIOD_SHIFT                   6
+#define DA9150_M_GPIOD_MASK                    BIT(6)
+#define DA9150_M_GPADC_SHIFT                   7
+#define DA9150_M_GPADC_MASK                    BIT(7)
+
+/* DA9150_IRQ_MASK_H = 0x07F */
+#define DA9150_M_WKUP_SHIFT                    0
+#define DA9150_M_WKUP_MASK                     BIT(0)
+
+/* DA9150_PAGE_CON_1 = 0x080 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_CONFIG_A = 0x0E0 */
+#define DA9150_RESET_DUR_SHIFT                 0
+#define DA9150_RESET_DUR_MASK                  (0x03 << 0)
+#define DA9150_RESET_EXT_SHIFT                 2
+#define DA9150_RESET_EXT_MASK                  (0x03 << 2)
+#define DA9150_START_MAX_SHIFT                 4
+#define DA9150_START_MAX_MASK                  (0x03 << 4)
+#define DA9150_PS_WAIT_EN_SHIFT                        6
+#define DA9150_PS_WAIT_EN_MASK                 BIT(6)
+#define DA9150_PS_DISABLE_DIRECT_SHIFT         7
+#define DA9150_PS_DISABLE_DIRECT_MASK          BIT(7)
+
+/* DA9150_CONFIG_B = 0x0E1 */
+#define DA9150_VFAULT_ADJ_SHIFT                        0
+#define DA9150_VFAULT_ADJ_MASK                 (0x0f << 0)
+#define DA9150_VFAULT_HYST_SHIFT               4
+#define DA9150_VFAULT_HYST_MASK                        (0x07 << 4)
+#define DA9150_VFAULT_EN_SHIFT                 7
+#define DA9150_VFAULT_EN_MASK                  BIT(7)
+
+/* DA9150_CONFIG_C = 0x0E2 */
+#define DA9150_VSYS_MIN_SHIFT                  3
+#define DA9150_VSYS_MIN_MASK                   (0x1f << 3)
+
+/* DA9150_CONFIG_D = 0x0E3 */
+#define DA9150_LFOSC_EXT_SHIFT                 0
+#define DA9150_LFOSC_EXT_MASK                  BIT(0)
+#define DA9150_VDD33_DWN_SHIFT                 1
+#define DA9150_VDD33_DWN_MASK                  BIT(1)
+#define DA9150_WKUP_PM_EN_SHIFT                        2
+#define DA9150_WKUP_PM_EN_MASK                 BIT(2)
+#define DA9150_WKUP_CE_SEL_SHIFT               3
+#define DA9150_WKUP_CE_SEL_MASK                        (0x03 << 3)
+#define DA9150_WKUP_CLK32K_EN_SHIFT            5
+#define DA9150_WKUP_CLK32K_EN_MASK             BIT(5)
+#define DA9150_DISABLE_DEL_SHIFT               7
+#define DA9150_DISABLE_DEL_MASK                        BIT(7)
+
+/* DA9150_CONFIG_E = 0x0E4 */
+#define DA9150_PM_SPKSUP_DIS_SHIFT             0
+#define DA9150_PM_SPKSUP_DIS_MASK              BIT(0)
+#define DA9150_PM_MERGE_SHIFT                  1
+#define DA9150_PM_MERGE_MASK                   BIT(1)
+#define DA9150_PM_SR_OFF_SHIFT                 2
+#define DA9150_PM_SR_OFF_MASK                  BIT(2)
+#define DA9150_PM_TIMEOUT_EN_SHIFT             3
+#define DA9150_PM_TIMEOUT_EN_MASK              BIT(3)
+#define DA9150_PM_DLY_SEL_SHIFT                        4
+#define DA9150_PM_DLY_SEL_MASK                 (0x07 << 4)
+#define DA9150_PM_OUT_DLY_SEL_SHIFT            7
+#define DA9150_PM_OUT_DLY_SEL_MASK             BIT(7)
+
+/* DA9150_CONTROL_A = 0x0E5 */
+#define DA9150_VDD33_SL_SHIFT                  0
+#define DA9150_VDD33_SL_MASK                   BIT(0)
+#define DA9150_VDD33_LPM_SHIFT                 1
+#define DA9150_VDD33_LPM_MASK                  (0x03 << 1)
+#define DA9150_VDD33_EN_SHIFT                  3
+#define DA9150_VDD33_EN_MASK                   BIT(3)
+#define DA9150_GPI_LPM_SHIFT                   6
+#define DA9150_GPI_LPM_MASK                    BIT(6)
+#define DA9150_PM_IF_LPM_SHIFT                 7
+#define DA9150_PM_IF_LPM_MASK                  BIT(7)
+
+/* DA9150_CONTROL_B = 0x0E6 */
+#define DA9150_LPM_SHIFT                       0
+#define DA9150_LPM_MASK                                BIT(0)
+#define DA9150_RESET_SHIFT                     1
+#define DA9150_RESET_MASK                      BIT(1)
+#define DA9150_RESET_USRCONF_EN_SHIFT          2
+#define DA9150_RESET_USRCONF_EN_MASK           BIT(2)
+
+/* DA9150_CONTROL_C = 0x0E7 */
+#define DA9150_DISABLE_SHIFT                   0
+#define DA9150_DISABLE_MASK                    BIT(0)
+
+/* DA9150_GPIO_A_B = 0x0E8 */
+#define DA9150_GPIOA_PIN_SHIFT                 0
+#define DA9150_GPIOA_PIN_MASK                  (0x07 << 0)
+#define DA9150_GPIOA_PIN_GPI                   (0x00 << 0)
+#define DA9150_GPIOA_PIN_GPO_OD                        BIT(0)
+#define DA9150_GPIOA_TYPE_SHIFT                        3
+#define DA9150_GPIOA_TYPE_MASK                 BIT(3)
+#define DA9150_GPIOB_PIN_SHIFT                 4
+#define DA9150_GPIOB_PIN_MASK                  (0x07 << 4)
+#define DA9150_GPIOB_PIN_GPI                   (0x00 << 4)
+#define DA9150_GPIOB_PIN_GPO_OD                        BIT(4)
+#define DA9150_GPIOB_TYPE_SHIFT                        7
+#define DA9150_GPIOB_TYPE_MASK                 BIT(7)
+
+/* DA9150_GPIO_C_D = 0x0E9 */
+#define DA9150_GPIOC_PIN_SHIFT                 0
+#define DA9150_GPIOC_PIN_MASK                  (0x07 << 0)
+#define DA9150_GPIOC_PIN_GPI                   (0x00 << 0)
+#define DA9150_GPIOC_PIN_GPO_OD                        BIT(0)
+#define DA9150_GPIOC_TYPE_SHIFT                        3
+#define DA9150_GPIOC_TYPE_MASK                 BIT(3)
+#define DA9150_GPIOD_PIN_SHIFT                 4
+#define DA9150_GPIOD_PIN_MASK                  (0x07 << 4)
+#define DA9150_GPIOD_PIN_GPI                   (0x00 << 4)
+#define DA9150_GPIOD_PIN_GPO_OD                        BIT(4)
+#define DA9150_GPIOD_TYPE_SHIFT                        7
+#define DA9150_GPIOD_TYPE_MASK                 BIT(7)
+
+/* DA9150_GPIO_MODE_CONT = 0x0EA */
+#define DA9150_GPIOA_MODE_SHIFT                        0
+#define DA9150_GPIOA_MODE_MASK                 BIT(0)
+#define DA9150_GPIOB_MODE_SHIFT                        1
+#define DA9150_GPIOB_MODE_MASK                 BIT(1)
+#define DA9150_GPIOC_MODE_SHIFT                        2
+#define DA9150_GPIOC_MODE_MASK                 BIT(2)
+#define DA9150_GPIOD_MODE_SHIFT                        3
+#define DA9150_GPIOD_MODE_MASK                 BIT(3)
+#define DA9150_GPIOA_CONT_SHIFT                        4
+#define DA9150_GPIOA_CONT_MASK                 BIT(4)
+#define DA9150_GPIOB_CONT_SHIFT                        5
+#define DA9150_GPIOB_CONT_MASK                 BIT(5)
+#define DA9150_GPIOC_CONT_SHIFT                        6
+#define DA9150_GPIOC_CONT_MASK                 BIT(6)
+#define DA9150_GPIOD_CONT_SHIFT                        7
+#define DA9150_GPIOD_CONT_MASK                 BIT(7)
+
+/* DA9150_GPIO_CTRL_B = 0x0EB */
+#define DA9150_WAKE_PIN_SHIFT                  0
+#define DA9150_WAKE_PIN_MASK                   (0x03 << 0)
+#define DA9150_WAKE_MODE_SHIFT                 2
+#define DA9150_WAKE_MODE_MASK                  BIT(2)
+#define DA9150_WAKE_CONT_SHIFT                 3
+#define DA9150_WAKE_CONT_MASK                  BIT(3)
+#define DA9150_WAKE_DLY_SHIFT                  4
+#define DA9150_WAKE_DLY_MASK                   BIT(4)
+
+/* DA9150_GPIO_CTRL_A = 0x0EC */
+#define DA9150_GPIOA_ANAEN_SHIFT               0
+#define DA9150_GPIOA_ANAEN_MASK                        BIT(0)
+#define DA9150_GPIOB_ANAEN_SHIFT               1
+#define DA9150_GPIOB_ANAEN_MASK                        BIT(1)
+#define DA9150_GPIOC_ANAEN_SHIFT               2
+#define DA9150_GPIOC_ANAEN_MASK                        BIT(2)
+#define DA9150_GPIOD_ANAEN_SHIFT               3
+#define DA9150_GPIOD_ANAEN_MASK                        BIT(3)
+#define DA9150_GPIO_ANAEN                      0x01
+#define DA9150_GPIO_ANAEN_MASK                 0x0F
+#define DA9150_CHGLED_PIN_SHIFT                        5
+#define DA9150_CHGLED_PIN_MASK                 (0x07 << 5)
+
+/* DA9150_GPIO_CTRL_C = 0x0ED */
+#define DA9150_CHGBL_DUR_SHIFT                 0
+#define DA9150_CHGBL_DUR_MASK                  (0x03 << 0)
+#define DA9150_CHGBL_DBL_SHIFT                 2
+#define DA9150_CHGBL_DBL_MASK                  BIT(2)
+#define DA9150_CHGBL_FRQ_SHIFT                 3
+#define DA9150_CHGBL_FRQ_MASK                  (0x03 << 3)
+#define DA9150_CHGBL_FLKR_SHIFT                        5
+#define DA9150_CHGBL_FLKR_MASK                 BIT(5)
+
+/* DA9150_GPIO_CFG_A = 0x0EE */
+#define DA9150_CE_LPM_DEB_SHIFT                        0
+#define DA9150_CE_LPM_DEB_MASK                 (0x07 << 0)
+
+/* DA9150_GPIO_CFG_B = 0x0EF */
+#define DA9150_GPIOA_PUPD_SHIFT                        0
+#define DA9150_GPIOA_PUPD_MASK                 BIT(0)
+#define DA9150_GPIOB_PUPD_SHIFT                        1
+#define DA9150_GPIOB_PUPD_MASK                 BIT(1)
+#define DA9150_GPIOC_PUPD_SHIFT                        2
+#define DA9150_GPIOC_PUPD_MASK                 BIT(2)
+#define DA9150_GPIOD_PUPD_SHIFT                        3
+#define DA9150_GPIOD_PUPD_MASK                 BIT(3)
+#define DA9150_GPIO_PUPD_MASK                  (0xF << 0)
+#define DA9150_GPI_DEB_SHIFT                   4
+#define DA9150_GPI_DEB_MASK                    (0x07 << 4)
+#define DA9150_LPM_EN_SHIFT                    7
+#define DA9150_LPM_EN_MASK                     BIT(7)
+
+/* DA9150_GPIO_CFG_C = 0x0F0 */
+#define DA9150_GPI_V_SHIFT                     0
+#define DA9150_GPI_V_MASK                      BIT(0)
+#define DA9150_VDDIO_INT_SHIFT                 1
+#define DA9150_VDDIO_INT_MASK                  BIT(1)
+#define DA9150_FAULT_PIN_SHIFT                 3
+#define DA9150_FAULT_PIN_MASK                  (0x07 << 3)
+#define DA9150_FAULT_TYPE_SHIFT                        6
+#define DA9150_FAULT_TYPE_MASK                 BIT(6)
+#define DA9150_NIRQ_PUPD_SHIFT                 7
+#define DA9150_NIRQ_PUPD_MASK                  BIT(7)
+
+/* DA9150_GPADC_MAN = 0x0F2 */
+#define DA9150_GPADC_EN_SHIFT                  0
+#define DA9150_GPADC_EN_MASK                   BIT(0)
+#define DA9150_GPADC_MUX_SHIFT                 1
+#define DA9150_GPADC_MUX_MASK                  (0x1f << 1)
+
+/* DA9150_GPADC_RES_A = 0x0F4 */
+#define DA9150_GPADC_RES_H_SHIFT               0
+#define DA9150_GPADC_RES_H_MASK                        (0xff << 0)
+
+/* DA9150_GPADC_RES_B = 0x0F5 */
+#define DA9150_GPADC_RUN_SHIFT                 0
+#define DA9150_GPADC_RUN_MASK                  BIT(0)
+#define DA9150_GPADC_RES_L_SHIFT               6
+#define DA9150_GPADC_RES_L_MASK                        (0x03 << 6)
+#define DA9150_GPADC_RES_L_BITS                        2
+
+/* DA9150_PAGE_CON_2 = 0x100 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_OTP_CONT_SHARED = 0x101 */
+#define DA9150_PC_DONE_SHIFT                   3
+#define DA9150_PC_DONE_MASK                    BIT(3)
+
+/* DA9150_INTERFACE_SHARED = 0x105 */
+#define DA9150_IF_BASE_ADDR_SHIFT              4
+#define DA9150_IF_BASE_ADDR_MASK               (0x0f << 4)
+
+/* DA9150_CONFIG_A_SHARED = 0x106 */
+#define DA9150_NIRQ_VDD_SHIFT                  1
+#define DA9150_NIRQ_VDD_MASK                   BIT(1)
+#define DA9150_NIRQ_PIN_SHIFT                  2
+#define DA9150_NIRQ_PIN_MASK                   BIT(2)
+#define DA9150_NIRQ_TYPE_SHIFT                 3
+#define DA9150_NIRQ_TYPE_MASK                  BIT(3)
+#define DA9150_PM_IF_V_SHIFT                   4
+#define DA9150_PM_IF_V_MASK                    BIT(4)
+#define DA9150_PM_IF_FMP_SHIFT                 5
+#define DA9150_PM_IF_FMP_MASK                  BIT(5)
+#define DA9150_PM_IF_HSM_SHIFT                 6
+#define DA9150_PM_IF_HSM_MASK                  BIT(6)
+
+/* DA9150_CONFIG_D_SHARED = 0x109 */
+#define DA9150_NIRQ_MODE_SHIFT                 1
+#define DA9150_NIRQ_MODE_MASK                  BIT(1)
+
+/* DA9150_ADETVB_CFG_C = 0x150 */
+#define DA9150_TADP_RISE_SHIFT                 0
+#define DA9150_TADP_RISE_MASK                  (0xff << 0)
+
+/* DA9150_ADETD_STAT = 0x151 */
+#define DA9150_DCD_STAT_SHIFT                  0
+#define DA9150_DCD_STAT_MASK                   BIT(0)
+#define DA9150_PCD_STAT_SHIFT                  1
+#define DA9150_PCD_STAT_MASK                   (0x03 << 1)
+#define DA9150_SCD_STAT_SHIFT                  3
+#define DA9150_SCD_STAT_MASK                   (0x03 << 3)
+#define DA9150_DP_STAT_SHIFT                   5
+#define DA9150_DP_STAT_MASK                    BIT(5)
+#define DA9150_DM_STAT_SHIFT                   6
+#define DA9150_DM_STAT_MASK                    BIT(6)
+
+/* DA9150_ADET_CMPSTAT = 0x152 */
+#define DA9150_DP_COMP_SHIFT                   1
+#define DA9150_DP_COMP_MASK                    BIT(1)
+#define DA9150_DM_COMP_SHIFT                   2
+#define DA9150_DM_COMP_MASK                    BIT(2)
+#define DA9150_ADP_SNS_COMP_SHIFT              3
+#define DA9150_ADP_SNS_COMP_MASK               BIT(3)
+#define DA9150_ADP_PRB_COMP_SHIFT              4
+#define DA9150_ADP_PRB_COMP_MASK               BIT(4)
+#define DA9150_ID_COMP_SHIFT                   5
+#define DA9150_ID_COMP_MASK                    BIT(5)
+
+/* DA9150_ADET_CTRL_A = 0x153 */
+#define DA9150_AID_DAT_SHIFT                   0
+#define DA9150_AID_DAT_MASK                    BIT(0)
+#define DA9150_AID_ID_SHIFT                    1
+#define DA9150_AID_ID_MASK                     BIT(1)
+#define DA9150_AID_TRIG_SHIFT                  2
+#define DA9150_AID_TRIG_MASK                   BIT(2)
+
+/* DA9150_ADETVB_CFG_B = 0x154 */
+#define DA9150_VB_MODE_SHIFT                   0
+#define DA9150_VB_MODE_MASK                    (0x03 << 0)
+#define DA9150_VB_MODE_VB_SESS                 BIT(0)
+
+#define DA9150_TADP_PRB_SHIFT                  2
+#define DA9150_TADP_PRB_MASK                   BIT(2)
+#define DA9150_DAT_RPD_EXT_SHIFT               5
+#define DA9150_DAT_RPD_EXT_MASK                        BIT(5)
+#define DA9150_CONF_RPD_SHIFT                  6
+#define DA9150_CONF_RPD_MASK                   BIT(6)
+#define DA9150_CONF_SRP_SHIFT                  7
+#define DA9150_CONF_SRP_MASK                   BIT(7)
+
+/* DA9150_ADETVB_CFG_A = 0x155 */
+#define DA9150_AID_MODE_SHIFT                  0
+#define DA9150_AID_MODE_MASK                   (0x03 << 0)
+#define DA9150_AID_EXT_POL_SHIFT               2
+#define DA9150_AID_EXT_POL_MASK                        BIT(2)
+
+/* DA9150_ADETAC_CFG_A = 0x156 */
+#define DA9150_ISET_CDP_SHIFT                  0
+#define DA9150_ISET_CDP_MASK                   (0x1f << 0)
+#define DA9150_CONF_DBP_SHIFT                  5
+#define DA9150_CONF_DBP_MASK                   BIT(5)
+
+/* DA9150_ADDETAC_CFG_B = 0x157 */
+#define DA9150_ISET_DCHG_SHIFT                 0
+#define DA9150_ISET_DCHG_MASK                  (0x1f << 0)
+#define DA9150_CONF_GPIOA_SHIFT                        5
+#define DA9150_CONF_GPIOA_MASK                 BIT(5)
+#define DA9150_CONF_GPIOB_SHIFT                        6
+#define DA9150_CONF_GPIOB_MASK                 BIT(6)
+#define DA9150_AID_VB_SHIFT                    7
+#define DA9150_AID_VB_MASK                     BIT(7)
+
+/* DA9150_ADETAC_CFG_C = 0x158 */
+#define DA9150_ISET_DEF_SHIFT                  0
+#define DA9150_ISET_DEF_MASK                   (0x1f << 0)
+#define DA9150_CONF_MODE_SHIFT                 5
+#define DA9150_CONF_MODE_MASK                  (0x03 << 5)
+#define DA9150_AID_CR_DIS_SHIFT                        7
+#define DA9150_AID_CR_DIS_MASK                 BIT(7)
+
+/* DA9150_ADETAC_CFG_D = 0x159 */
+#define DA9150_ISET_UNIT_SHIFT                 0
+#define DA9150_ISET_UNIT_MASK                  (0x1f << 0)
+#define DA9150_AID_UNCLAMP_SHIFT               5
+#define DA9150_AID_UNCLAMP_MASK                        BIT(5)
+
+/* DA9150_ADETVB_CFG_D = 0x15A */
+#define DA9150_ID_MODE_SHIFT                   0
+#define DA9150_ID_MODE_MASK                    (0x03 << 0)
+#define DA9150_DAT_MODE_SHIFT                  2
+#define DA9150_DAT_MODE_MASK                   (0x0f << 2)
+#define DA9150_DAT_SWP_SHIFT                   6
+#define DA9150_DAT_SWP_MASK                    BIT(6)
+#define DA9150_DAT_CLAMP_EXT_SHIFT             7
+#define DA9150_DAT_CLAMP_EXT_MASK              BIT(7)
+
+/* DA9150_ADETID_CFG_A = 0x15B */
+#define DA9150_TID_POLL_SHIFT                  0
+#define DA9150_TID_POLL_MASK                   (0x07 << 0)
+#define DA9150_RID_CONV_SHIFT                  3
+#define DA9150_RID_CONV_MASK                   BIT(3)
+
+/* DA9150_ADET_RID_PT_CHG_H = 0x15C */
+#define DA9150_RID_PT_CHG_H_SHIFT              0
+#define DA9150_RID_PT_CHG_H_MASK               (0xff << 0)
+
+/* DA9150_ADET_RID_PT_CHG_L = 0x15D */
+#define DA9150_RID_PT_CHG_L_SHIFT              6
+#define DA9150_RID_PT_CHG_L_MASK               (0x03 << 6)
+
+/* DA9150_PPR_TCTR_B = 0x160 */
+#define DA9150_CHG_TCTR_VAL_SHIFT              0
+#define DA9150_CHG_TCTR_VAL_MASK               (0xff << 0)
+
+/* DA9150_PPR_BKCTRL_A = 0x163 */
+#define DA9150_VBUS_MODE_SHIFT                 0
+#define DA9150_VBUS_MODE_MASK                  (0x03 << 0)
+#define DA9150_VBUS_MODE_CHG                   BIT(0)
+#define DA9150_VBUS_MODE_OTG                   (0x02 << 0)
+#define DA9150_VBUS_LPM_SHIFT                  2
+#define DA9150_VBUS_LPM_MASK                   (0x03 << 2)
+#define DA9150_VBUS_SUSP_SHIFT                 4
+#define DA9150_VBUS_SUSP_MASK                  BIT(4)
+#define DA9150_VBUS_PWM_SHIFT                  5
+#define DA9150_VBUS_PWM_MASK                   BIT(5)
+#define DA9150_VBUS_ISO_SHIFT                  6
+#define DA9150_VBUS_ISO_MASK                   BIT(6)
+#define DA9150_VBUS_LDO_SHIFT                  7
+#define DA9150_VBUS_LDO_MASK                   BIT(7)
+
+/* DA9150_PPR_BKCFG_A = 0x164 */
+#define DA9150_VBUS_ISET_SHIFT                 0
+#define DA9150_VBUS_ISET_MASK                  (0x1f << 0)
+#define DA9150_VBUS_IMAX_SHIFT                 5
+#define DA9150_VBUS_IMAX_MASK                  BIT(5)
+#define DA9150_VBUS_IOTG_SHIFT                 6
+#define DA9150_VBUS_IOTG_MASK                  (0x03 << 6)
+
+/* DA9150_PPR_BKCFG_B = 0x165 */
+#define DA9150_VBUS_DROP_SHIFT                 0
+#define DA9150_VBUS_DROP_MASK                  (0x0f << 0)
+#define DA9150_VBUS_FAULT_DIS_SHIFT            6
+#define DA9150_VBUS_FAULT_DIS_MASK             BIT(6)
+#define DA9150_OTG_FAULT_DIS_SHIFT             7
+#define DA9150_OTG_FAULT_DIS_MASK              BIT(7)
+
+/* DA9150_PPR_CHGCTRL_A = 0x166 */
+#define DA9150_CHG_EN_SHIFT                    0
+#define DA9150_CHG_EN_MASK                     BIT(0)
+
+/* DA9150_PPR_CHGCTRL_B = 0x167 */
+#define DA9150_CHG_VBAT_SHIFT                  0
+#define DA9150_CHG_VBAT_MASK                   (0x1f << 0)
+#define DA9150_CHG_VDROP_SHIFT                 6
+#define DA9150_CHG_VDROP_MASK                  (0x03 << 6)
+
+/* DA9150_PPR_CHGCTRL_C = 0x168 */
+#define DA9150_CHG_VFAULT_SHIFT                        0
+#define DA9150_CHG_VFAULT_MASK                 (0x0f << 0)
+#define DA9150_CHG_IPRE_SHIFT                  4
+#define DA9150_CHG_IPRE_MASK                   (0x03 << 4)
+
+/* DA9150_PPR_TCTR_A = 0x169 */
+#define DA9150_CHG_TCTR_SHIFT                  0
+#define DA9150_CHG_TCTR_MASK                   (0x07 << 0)
+#define DA9150_CHG_TCTR_MODE_SHIFT             4
+#define DA9150_CHG_TCTR_MODE_MASK              BIT(4)
+
+/* DA9150_PPR_CHGCTRL_D = 0x16A */
+#define DA9150_CHG_IBAT_SHIFT                  0
+#define DA9150_CHG_IBAT_MASK                   (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_E = 0x16B */
+#define DA9150_CHG_IEND_SHIFT                  0
+#define DA9150_CHG_IEND_MASK                   (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_F = 0x16C */
+#define DA9150_CHG_VCOLD_SHIFT                 0
+#define DA9150_CHG_VCOLD_MASK                  (0x1f << 0)
+#define DA9150_TBAT_TQA_EN_SHIFT               6
+#define DA9150_TBAT_TQA_EN_MASK                        BIT(6)
+#define DA9150_TBAT_TDP_EN_SHIFT               7
+#define DA9150_TBAT_TDP_EN_MASK                        BIT(7)
+
+/* DA9150_PPR_CHGCTRL_G = 0x16D */
+#define DA9150_CHG_VWARM_SHIFT                 0
+#define DA9150_CHG_VWARM_MASK                  (0x1f << 0)
+
+/* DA9150_PPR_CHGCTRL_H = 0x16E */
+#define DA9150_CHG_VHOT_SHIFT                  0
+#define DA9150_CHG_VHOT_MASK                   (0x1f << 0)
+
+/* DA9150_PPR_CHGCTRL_I = 0x16F */
+#define DA9150_CHG_ICOLD_SHIFT                 0
+#define DA9150_CHG_ICOLD_MASK                  (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_J = 0x170 */
+#define DA9150_CHG_IWARM_SHIFT                 0
+#define DA9150_CHG_IWARM_MASK                  (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_K = 0x171 */
+#define DA9150_CHG_IHOT_SHIFT                  0
+#define DA9150_CHG_IHOT_MASK                   (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_L = 0x172 */
+#define DA9150_CHG_IBAT_TRED_SHIFT             0
+#define DA9150_CHG_IBAT_TRED_MASK              (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_M = 0x173 */
+#define DA9150_CHG_VFLOAT_SHIFT                        0
+#define DA9150_CHG_VFLOAT_MASK                 (0x0f << 0)
+#define DA9150_CHG_LPM_SHIFT                   5
+#define DA9150_CHG_LPM_MASK                    BIT(5)
+#define DA9150_CHG_NBLO_SHIFT                  6
+#define DA9150_CHG_NBLO_MASK                   BIT(6)
+#define DA9150_EBS_EN_SHIFT                    7
+#define DA9150_EBS_EN_MASK                     BIT(7)
+
+/* DA9150_PPR_THYST_A = 0x174 */
+#define DA9150_TBAT_T1_SHIFT                   0
+#define DA9150_TBAT_T1_MASK                    (0xff << 0)
+
+/* DA9150_PPR_THYST_B = 0x175 */
+#define DA9150_TBAT_T2_SHIFT                   0
+#define DA9150_TBAT_T2_MASK                    (0xff << 0)
+
+/* DA9150_PPR_THYST_C = 0x176 */
+#define DA9150_TBAT_T3_SHIFT                   0
+#define DA9150_TBAT_T3_MASK                    (0xff << 0)
+
+/* DA9150_PPR_THYST_D = 0x177 */
+#define DA9150_TBAT_T4_SHIFT                   0
+#define DA9150_TBAT_T4_MASK                    (0xff << 0)
+
+/* DA9150_PPR_THYST_E = 0x178 */
+#define DA9150_TBAT_T5_SHIFT                   0
+#define DA9150_TBAT_T5_MASK                    (0xff << 0)
+
+/* DA9150_PPR_THYST_F = 0x179 */
+#define DA9150_TBAT_H1_SHIFT                   0
+#define DA9150_TBAT_H1_MASK                    (0xff << 0)
+
+/* DA9150_PPR_THYST_G = 0x17A */
+#define DA9150_TBAT_H5_SHIFT                   0
+#define DA9150_TBAT_H5_MASK                    (0xff << 0)
+
+/* DA9150_PAGE_CON_3 = 0x180 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_PAGE_CON_4 = 0x200 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_PAGE_CON_5 = 0x280 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_PAGE_CON_6 = 0x300 */
+#define DA9150_PAGE_SHIFT                      0
+#define DA9150_PAGE_MASK                       (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT                        6
+#define DA9150_WRITE_MODE_MASK                 BIT(6)
+#define DA9150_REVERT_SHIFT                    7
+#define DA9150_REVERT_MASK                     BIT(7)
+
+/* DA9150_COREBTLD_STAT_A = 0x302 */
+#define DA9150_BOOTLD_STAT_SHIFT               0
+#define DA9150_BOOTLD_STAT_MASK                        (0x03 << 0)
+#define DA9150_CORE_LOCKUP_SHIFT               2
+#define DA9150_CORE_LOCKUP_MASK                        BIT(2)
+
+/* DA9150_COREBTLD_CTRL_A = 0x303 */
+#define DA9150_CORE_RESET_SHIFT                        0
+#define DA9150_CORE_RESET_MASK                 BIT(0)
+#define DA9150_CORE_STOP_SHIFT                 1
+#define DA9150_CORE_STOP_MASK                  BIT(1)
+
+/* DA9150_CORE_CONFIG_A = 0x304 */
+#define DA9150_CORE_MEMMUX_SHIFT               0
+#define DA9150_CORE_MEMMUX_MASK                        (0x03 << 0)
+#define DA9150_WDT_AUTO_START_SHIFT            2
+#define DA9150_WDT_AUTO_START_MASK             BIT(2)
+#define DA9150_WDT_AUTO_LOCK_SHIFT             3
+#define DA9150_WDT_AUTO_LOCK_MASK              BIT(3)
+#define DA9150_WDT_HLT_NO_CLK_SHIFT            4
+#define DA9150_WDT_HLT_NO_CLK_MASK             BIT(4)
+
+/* DA9150_CORE_CONFIG_C = 0x305 */
+#define DA9150_CORE_SW_SIZE_SHIFT              0
+#define DA9150_CORE_SW_SIZE_MASK               (0xff << 0)
+
+/* DA9150_CORE_CONFIG_B = 0x306 */
+#define DA9150_BOOTLD_EN_SHIFT                 0
+#define DA9150_BOOTLD_EN_MASK                  BIT(0)
+#define DA9150_CORE_EN_SHIFT                   2
+#define DA9150_CORE_EN_MASK                    BIT(2)
+#define DA9150_CORE_SW_SRC_SHIFT               3
+#define DA9150_CORE_SW_SRC_MASK                        (0x07 << 3)
+#define DA9150_DEEP_SLEEP_EN_SHIFT             7
+#define DA9150_DEEP_SLEEP_EN_MASK              BIT(7)
+
+/* DA9150_CORE_CFG_DATA_A = 0x307 */
+#define DA9150_CORE_CFG_DT_A_SHIFT             0
+#define DA9150_CORE_CFG_DT_A_MASK              (0xff << 0)
+
+/* DA9150_CORE_CFG_DATA_B = 0x308 */
+#define DA9150_CORE_CFG_DT_B_SHIFT             0
+#define DA9150_CORE_CFG_DT_B_MASK              (0xff << 0)
+
+/* DA9150_CORE_CMD_A = 0x309 */
+#define DA9150_CORE_CMD_SHIFT                  0
+#define DA9150_CORE_CMD_MASK                   (0xff << 0)
+
+/* DA9150_CORE_DATA_A = 0x30A */
+#define DA9150_CORE_DATA_0_SHIFT               0
+#define DA9150_CORE_DATA_0_MASK                        (0xff << 0)
+
+/* DA9150_CORE_DATA_B = 0x30B */
+#define DA9150_CORE_DATA_1_SHIFT               0
+#define DA9150_CORE_DATA_1_MASK                        (0xff << 0)
+
+/* DA9150_CORE_DATA_C = 0x30C */
+#define DA9150_CORE_DATA_2_SHIFT               0
+#define DA9150_CORE_DATA_2_MASK                        (0xff << 0)
+
+/* DA9150_CORE_DATA_D = 0x30D */
+#define DA9150_CORE_DATA_3_SHIFT               0
+#define DA9150_CORE_DATA_3_MASK                        (0xff << 0)
+
+/* DA9150_CORE2WIRE_STAT_A = 0x310 */
+#define DA9150_FW_FWDL_ERR_SHIFT               7
+#define DA9150_FW_FWDL_ERR_MASK                        BIT(7)
+
+/* DA9150_CORE2WIRE_CTRL_A = 0x311 */
+#define DA9150_FW_FWDL_EN_SHIFT                        0
+#define DA9150_FW_FWDL_EN_MASK                 BIT(0)
+#define DA9150_FG_QIF_EN_SHIFT                 1
+#define DA9150_FG_QIF_EN_MASK                  BIT(1)
+#define DA9150_CORE_BASE_ADDR_SHIFT            4
+#define DA9150_CORE_BASE_ADDR_MASK             (0x0f << 4)
+
+/* DA9150_FW_CTRL_A = 0x312 */
+#define DA9150_FW_SEAL_SHIFT                   0
+#define DA9150_FW_SEAL_MASK                    (0xff << 0)
+
+/* DA9150_FW_CTRL_C = 0x313 */
+#define DA9150_FW_FWDL_CRC_SHIFT               0
+#define DA9150_FW_FWDL_CRC_MASK                        (0xff << 0)
+
+/* DA9150_FW_CTRL_D = 0x314 */
+#define DA9150_FW_FWDL_BASE_SHIFT              0
+#define DA9150_FW_FWDL_BASE_MASK               (0x0f << 0)
+
+/* DA9150_FG_CTRL_A = 0x315 */
+#define DA9150_FG_QIF_CODE_SHIFT               0
+#define DA9150_FG_QIF_CODE_MASK                        (0xff << 0)
+
+/* DA9150_FG_CTRL_B = 0x316 */
+#define DA9150_FG_QIF_VALUE_SHIFT              0
+#define DA9150_FG_QIF_VALUE_MASK               (0xff << 0)
+
+/* DA9150_FW_CTRL_E = 0x317 */
+#define DA9150_FW_FWDL_SEG_SHIFT               0
+#define DA9150_FW_FWDL_SEG_MASK                        (0xff << 0)
+
+/* DA9150_FW_CTRL_B = 0x318 */
+#define DA9150_FW_FWDL_VALUE_SHIFT             0
+#define DA9150_FW_FWDL_VALUE_MASK              (0xff << 0)
+
+/* DA9150_GPADC_CMAN = 0x320 */
+#define DA9150_GPADC_CEN_SHIFT                 0
+#define DA9150_GPADC_CEN_MASK                  BIT(0)
+#define DA9150_GPADC_CMUX_SHIFT                        1
+#define DA9150_GPADC_CMUX_MASK                 (0x1f << 1)
+
+/* DA9150_GPADC_CRES_A = 0x322 */
+#define DA9150_GPADC_CRES_H_SHIFT              0
+#define DA9150_GPADC_CRES_H_MASK               (0xff << 0)
+
+/* DA9150_GPADC_CRES_B = 0x323 */
+#define DA9150_GPADC_CRUN_SHIFT                        0
+#define DA9150_GPADC_CRUN_MASK                 BIT(0)
+#define DA9150_GPADC_CRES_L_SHIFT              6
+#define DA9150_GPADC_CRES_L_MASK               (0x03 << 6)
+
+/* DA9150_CC_CFG_A = 0x328 */
+#define DA9150_CC_EN_SHIFT                     0
+#define DA9150_CC_EN_MASK                      BIT(0)
+#define DA9150_CC_TIMEBASE_SHIFT               1
+#define DA9150_CC_TIMEBASE_MASK                        (0x03 << 1)
+#define DA9150_CC_CFG_SHIFT                    5
+#define DA9150_CC_CFG_MASK                     (0x03 << 5)
+#define DA9150_CC_ENDLESS_MODE_SHIFT           7
+#define DA9150_CC_ENDLESS_MODE_MASK            BIT(7)
+
+/* DA9150_CC_CFG_B = 0x329 */
+#define DA9150_CC_OPT_SHIFT                    0
+#define DA9150_CC_OPT_MASK                     (0x03 << 0)
+#define DA9150_CC_PREAMP_SHIFT                 2
+#define DA9150_CC_PREAMP_MASK                  (0x03 << 2)
+
+/* DA9150_CC_ICHG_RES_A = 0x32A */
+#define DA9150_CC_ICHG_RES_H_SHIFT             0
+#define DA9150_CC_ICHG_RES_H_MASK              (0xff << 0)
+
+/* DA9150_CC_ICHG_RES_B = 0x32B */
+#define DA9150_CC_ICHG_RES_L_SHIFT             3
+#define DA9150_CC_ICHG_RES_L_MASK              (0x1f << 3)
+
+/* DA9150_CC_IAVG_RES_A = 0x32C */
+#define DA9150_CC_IAVG_RES_H_SHIFT             0
+#define DA9150_CC_IAVG_RES_H_MASK              (0xff << 0)
+
+/* DA9150_CC_IAVG_RES_B = 0x32D */
+#define DA9150_CC_IAVG_RES_L_SHIFT             0
+#define DA9150_CC_IAVG_RES_L_MASK              (0xff << 0)
+
+/* DA9150_TAUX_CTRL_A = 0x330 */
+#define DA9150_TAUX_EN_SHIFT                   0
+#define DA9150_TAUX_EN_MASK                    BIT(0)
+#define DA9150_TAUX_MOD_SHIFT                  1
+#define DA9150_TAUX_MOD_MASK                   BIT(1)
+#define DA9150_TAUX_UPDATE_SHIFT               2
+#define DA9150_TAUX_UPDATE_MASK                        BIT(2)
+
+/* DA9150_TAUX_RELOAD_H = 0x332 */
+#define DA9150_TAUX_RLD_H_SHIFT                        0
+#define DA9150_TAUX_RLD_H_MASK                 (0xff << 0)
+
+/* DA9150_TAUX_RELOAD_L = 0x333 */
+#define DA9150_TAUX_RLD_L_SHIFT                        3
+#define DA9150_TAUX_RLD_L_MASK                 (0x1f << 3)
+
+/* DA9150_TAUX_VALUE_H = 0x334 */
+#define DA9150_TAUX_VAL_H_SHIFT                        0
+#define DA9150_TAUX_VAL_H_MASK                 (0xff << 0)
+
+/* DA9150_TAUX_VALUE_L = 0x335 */
+#define DA9150_TAUX_VAL_L_SHIFT                        3
+#define DA9150_TAUX_VAL_L_MASK                 (0x1f << 3)
+
+/* DA9150_AUX_DATA_0 = 0x338 */
+#define DA9150_AUX_DAT_0_SHIFT                 0
+#define DA9150_AUX_DAT_0_MASK                  (0xff << 0)
+
+/* DA9150_AUX_DATA_1 = 0x339 */
+#define DA9150_AUX_DAT_1_SHIFT                 0
+#define DA9150_AUX_DAT_1_MASK                  (0xff << 0)
+
+/* DA9150_AUX_DATA_2 = 0x33A */
+#define DA9150_AUX_DAT_2_SHIFT                 0
+#define DA9150_AUX_DAT_2_MASK                  (0xff << 0)
+
+/* DA9150_AUX_DATA_3 = 0x33B */
+#define DA9150_AUX_DAT_3_SHIFT                 0
+#define DA9150_AUX_DAT_3_MASK                  (0xff << 0)
+
+/* DA9150_BIF_CTRL = 0x340 */
+#define DA9150_BIF_ISRC_EN_SHIFT               0
+#define DA9150_BIF_ISRC_EN_MASK                        BIT(0)
+
+/* DA9150_TBAT_CTRL_A = 0x342 */
+#define DA9150_TBAT_EN_SHIFT                   0
+#define DA9150_TBAT_EN_MASK                    BIT(0)
+#define DA9150_TBAT_SW1_SHIFT                  1
+#define DA9150_TBAT_SW1_MASK                   BIT(1)
+#define DA9150_TBAT_SW2_SHIFT                  2
+#define DA9150_TBAT_SW2_MASK                   BIT(2)
+
+/* DA9150_TBAT_CTRL_B = 0x343 */
+#define DA9150_TBAT_SW_FRC_SHIFT               0
+#define DA9150_TBAT_SW_FRC_MASK                        BIT(0)
+#define DA9150_TBAT_STAT_SW1_SHIFT             1
+#define DA9150_TBAT_STAT_SW1_MASK              BIT(1)
+#define DA9150_TBAT_STAT_SW2_SHIFT             2
+#define DA9150_TBAT_STAT_SW2_MASK              BIT(2)
+#define DA9150_TBAT_HIGH_CURR_SHIFT            3
+#define DA9150_TBAT_HIGH_CURR_MASK             BIT(3)
+
+/* DA9150_TBAT_RES_A = 0x344 */
+#define DA9150_TBAT_RES_H_SHIFT                        0
+#define DA9150_TBAT_RES_H_MASK                 (0xff << 0)
+
+/* DA9150_TBAT_RES_B = 0x345 */
+#define DA9150_TBAT_RES_DIS_SHIFT              0
+#define DA9150_TBAT_RES_DIS_MASK               BIT(0)
+#define DA9150_TBAT_RES_L_SHIFT                        6
+#define DA9150_TBAT_RES_L_MASK                 (0x03 << 6)
+
+#endif /* __DA9150_REGISTERS_H */
index 960b92ad450d5a642bc8ff168f165134b590566b..f5043490d67c95956cf57247386d83fe71b76000 100644 (file)
@@ -447,7 +447,6 @@ struct max77686_dev {
        struct regmap_irq_chip_data *rtc_irq_data;
 
        int irq;
-       bool wakeup;
        struct mutex irqlock;
        int irq_masks_cur[MAX77686_IRQ_GROUP_NR];
        int irq_masks_cache[MAX77686_IRQ_GROUP_NR];
index 553f7d09258acc9348fef7ad989c3d97db600e1d..bb995ab9a57564be1ae635ac455688e6f2f1173b 100644 (file)
@@ -119,12 +119,6 @@ enum max77802_regulators {
        MAX77802_REG_MAX,
 };
 
-struct max77686_regulator_data {
-       int id;
-       struct regulator_init_data *initdata;
-       struct device_node *of_node;
-};
-
 enum max77686_opmode {
        MAX77686_OPMODE_NORMAL,
        MAX77686_OPMODE_LP,
@@ -136,26 +130,4 @@ struct max77686_opmode_data {
        int mode;
 };
 
-struct max77686_platform_data {
-       int ono;
-       int wakeup;
-
-       /* ---- PMIC ---- */
-       struct max77686_regulator_data *regulators;
-       int num_regulators;
-
-       struct max77686_opmode_data *opmode_data;
-
-       /*
-        * GPIO-DVS feature is not enabled with the current version of
-        * MAX77686 driver. Buck2/3/4_voltages[0] is used as the default
-        * voltage at probe. DVS/SELB gpios are set as OUTPUT-LOW.
-        */
-       int buck234_gpio_dvs[3]; /* GPIO of [0]DVS1, [1]DVS2, [2]DVS3 */
-       int buck234_gpio_selb[3]; /* [0]SELB2, [1]SELB3, [2]SELB4 */
-       unsigned int buck2_voltage[8]; /* buckx_voltage in uV */
-       unsigned int buck3_voltage[8];
-       unsigned int buck4_voltage[8];
-};
-
 #endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
new file mode 100644 (file)
index 0000000..742ebf1
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __QCOM_RPM_H__
+#define __QCOM_RPM_H__
+
+#include <linux/types.h>
+
+struct qcom_rpm;
+
+#define QCOM_RPM_ACTIVE_STATE  0
+#define QCOM_RPM_SLEEP_STATE   1
+
+int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count);
+
+#endif
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
new file mode 100644 (file)
index 0000000..1b63fc2
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * MFD core driver for Richtek RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#ifndef __RT5033_PRIVATE_H__
+#define __RT5033_PRIVATE_H__
+
+enum rt5033_reg {
+       RT5033_REG_CHG_STAT             = 0x00,
+       RT5033_REG_CHG_CTRL1            = 0x01,
+       RT5033_REG_CHG_CTRL2            = 0x02,
+       RT5033_REG_DEVICE_ID            = 0x03,
+       RT5033_REG_CHG_CTRL3            = 0x04,
+       RT5033_REG_CHG_CTRL4            = 0x05,
+       RT5033_REG_CHG_CTRL5            = 0x06,
+       RT5033_REG_RT_CTRL0             = 0x07,
+       RT5033_REG_CHG_RESET            = 0x08,
+       /* Reserved 0x09~0x18 */
+       RT5033_REG_RT_CTRL1             = 0x19,
+       /* Reserved 0x1A~0x20 */
+       RT5033_REG_FLED_FUNCTION1       = 0x21,
+       RT5033_REG_FLED_FUNCTION2       = 0x22,
+       RT5033_REG_FLED_STROBE_CTRL1    = 0x23,
+       RT5033_REG_FLED_STROBE_CTRL2    = 0x24,
+       RT5033_REG_FLED_CTRL1           = 0x25,
+       RT5033_REG_FLED_CTRL2           = 0x26,
+       RT5033_REG_FLED_CTRL3           = 0x27,
+       RT5033_REG_FLED_CTRL4           = 0x28,
+       RT5033_REG_FLED_CTRL5           = 0x29,
+       /* Reserved 0x2A~0x40 */
+       RT5033_REG_CTRL                 = 0x41,
+       RT5033_REG_BUCK_CTRL            = 0x42,
+       RT5033_REG_LDO_CTRL             = 0x43,
+       /* Reserved 0x44~0x46 */
+       RT5033_REG_MANUAL_RESET_CTRL    = 0x47,
+       /* Reserved 0x48~0x5F */
+       RT5033_REG_CHG_IRQ1             = 0x60,
+       RT5033_REG_CHG_IRQ2             = 0x61,
+       RT5033_REG_CHG_IRQ3             = 0x62,
+       RT5033_REG_CHG_IRQ1_CTRL        = 0x63,
+       RT5033_REG_CHG_IRQ2_CTRL        = 0x64,
+       RT5033_REG_CHG_IRQ3_CTRL        = 0x65,
+       RT5033_REG_LED_IRQ_STAT         = 0x66,
+       RT5033_REG_LED_IRQ_CTRL         = 0x67,
+       RT5033_REG_PMIC_IRQ_STAT        = 0x68,
+       RT5033_REG_PMIC_IRQ_CTRL        = 0x69,
+       RT5033_REG_SHDN_CTRL            = 0x6A,
+       RT5033_REG_OFF_EVENT            = 0x6B,
+
+       RT5033_REG_END,
+};
+
+/* RT5033 Charger state register */
+#define RT5033_CHG_STAT_MASK           0x20
+#define RT5033_CHG_STAT_DISCHARGING    0x00
+#define RT5033_CHG_STAT_FULL           0x10
+#define RT5033_CHG_STAT_CHARGING       0x20
+#define RT5033_CHG_STAT_NOT_CHARGING   0x30
+#define RT5033_CHG_STAT_TYPE_MASK      0x60
+#define RT5033_CHG_STAT_TYPE_PRE       0x20
+#define RT5033_CHG_STAT_TYPE_FAST      0x60
+
+/* RT5033 CHGCTRL1 register */
+#define RT5033_CHGCTRL1_IAICR_MASK     0xe0
+#define RT5033_CHGCTRL1_MODE_MASK      0x01
+
+/* RT5033 CHGCTRL2 register */
+#define RT5033_CHGCTRL2_CV_MASK                0xfc
+
+/* RT5033 CHGCTRL3 register */
+#define RT5033_CHGCTRL3_CFO_EN_MASK    0x40
+#define RT5033_CHGCTRL3_TIMER_MASK     0x38
+#define RT5033_CHGCTRL3_TIMER_EN_MASK  0x01
+
+/* RT5033 CHGCTRL4 register */
+#define RT5033_CHGCTRL4_EOC_MASK       0x07
+#define RT5033_CHGCTRL4_IPREC_MASK     0x18
+
+/* RT5033 CHGCTRL5 register */
+#define RT5033_CHGCTRL5_VPREC_MASK     0x0f
+#define RT5033_CHGCTRL5_ICHG_MASK      0xf0
+#define RT5033_CHGCTRL5_ICHG_SHIFT     0x04
+#define RT5033_CHG_MAX_CURRENT         0x0d
+
+/* RT5033 RT CTRL1 register */
+#define RT5033_RT_CTRL1_UUG_MASK       0x02
+#define RT5033_RT_HZ_MASK              0x01
+
+/* RT5033 control register */
+#define RT5033_CTRL_FCCM_BUCK_MASK             0x00
+#define RT5033_CTRL_BUCKOMS_MASK               0x01
+#define RT5033_CTRL_LDOOMS_MASK                        0x02
+#define RT5033_CTRL_SLDOOMS_MASK               0x03
+#define RT5033_CTRL_EN_BUCK_MASK               0x04
+#define RT5033_CTRL_EN_LDO_MASK                        0x05
+#define RT5033_CTRL_EN_SAFE_LDO_MASK           0x06
+#define RT5033_CTRL_LDO_SLEEP_MASK             0x07
+
+/* RT5033 BUCK control register */
+#define RT5033_BUCK_CTRL_MASK                  0x1f
+
+/* RT5033 LDO control register */
+#define RT5033_LDO_CTRL_MASK                   0x1f
+
+/* RT5033 charger property - model, manufacturer */
+
+#define RT5033_CHARGER_MODEL   "RT5033WSC Charger"
+#define RT5033_MANUFACTURER    "Richtek Technology Corporation"
+
+/*
+ * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
+ * AICR mode limits the input current for example,
+ * the AIRC 100 mode limits the input current to 100 mA.
+ */
+#define RT5033_AICR_100_MODE                   0x20
+#define RT5033_AICR_500_MODE                   0x40
+#define RT5033_AICR_700_MODE                   0x60
+#define RT5033_AICR_900_MODE                   0x80
+#define RT5033_AICR_1500_MODE                  0xc0
+#define RT5033_AICR_2000_MODE                  0xe0
+#define RT5033_AICR_MODE_MASK                  0xe0
+
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4              0x00
+#define RT5033_FAST_CHARGE_TIMER6              0x01
+#define RT5033_FAST_CHARGE_TIMER8              0x02
+#define RT5033_FAST_CHARGE_TIMER9              0x03
+#define RT5033_FAST_CHARGE_TIMER12             0x04
+#define RT5033_FAST_CHARGE_TIMER14             0x05
+#define RT5033_FAST_CHARGE_TIMER16             0x06
+
+#define RT5033_INT_TIMER_ENABLE                        0x01
+
+/* RT5033 charger termination enable mask */
+#define RT5033_TE_ENABLE_MASK                  0x08
+
+/*
+ * RT5033 charger opa mode. RT50300 have two opa mode charger mode
+ * and boost mode for OTG
+ */
+
+#define RT5033_CHARGER_MODE                    0x00
+#define RT5033_BOOST_MODE                      0x01
+
+/* RT5033 charger termination enable */
+#define RT5033_TE_ENABLE                       0x08
+
+/* RT5033 charger CFO enable */
+#define RT5033_CFO_ENABLE                      0x40
+
+/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
+#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
+#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM   25000U
+#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+
+/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
+#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN   350000U
+#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM    100000U
+#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX   650000U
+
+/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
+#define RT5033_CHARGER_FAST_CURRENT_MIN                700000U
+#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM   100000U
+#define RT5033_CHARGER_FAST_CURRENT_MAX                2000000U
+
+/*
+ * RT5033 charger const-charge end of charger current (
+ * as in CHGCTRL4 register), uA
+ */
+#define RT5033_CHARGER_EOC_MIN                 150000U
+#define RT5033_CHARGER_EOC_REF                 300000U
+#define RT5033_CHARGER_EOC_STEP_NUM1           50000U
+#define RT5033_CHARGER_EOC_STEP_NUM2           100000U
+#define RT5033_CHARGER_EOC_MAX                 600000U
+
+/*
+ * RT5033 charger pre-charge threshold volt limits
+ * (as in CHGCTRL5 register), uV
+ */
+
+#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
+#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM  100000U
+#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
+
+/*
+ * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * circuit.
+ */
+#define RT5033_CHARGER_UUG_ENABLE              0x02
+
+/* RT5033 charger High impedance mode */
+#define RT5033_CHARGER_HZ_DISABLE              0x00
+#define RT5033_CHARGER_HZ_ENABLE               0x01
+
+/* RT5033 regulator BUCK output voltage uV */
+#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN              1000000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX              3000000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP             100000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         32
+
+/* RT5033 regulator LDO output voltage uV */
+#define RT5033_REGULATOR_LDO_VOLTAGE_MIN               1200000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_MAX               3000000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP              100000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          32
+
+/* RT5033 regulator SAFE LDO output voltage uV */
+#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE              4900000U
+
+enum rt5033_fuel_reg {
+       RT5033_FUEL_REG_OCV_H           = 0x00,
+       RT5033_FUEL_REG_OCV_L           = 0x01,
+       RT5033_FUEL_REG_VBAT_H          = 0x02,
+       RT5033_FUEL_REG_VBAT_L          = 0x03,
+       RT5033_FUEL_REG_SOC_H           = 0x04,
+       RT5033_FUEL_REG_SOC_L           = 0x05,
+       RT5033_FUEL_REG_CTRL_H          = 0x06,
+       RT5033_FUEL_REG_CTRL_L          = 0x07,
+       RT5033_FUEL_REG_CRATE           = 0x08,
+       RT5033_FUEL_REG_DEVICE_ID       = 0x09,
+       RT5033_FUEL_REG_AVG_VOLT_H      = 0x0A,
+       RT5033_FUEL_REG_AVG_VOLT_L      = 0x0B,
+       RT5033_FUEL_REG_CONFIG_H        = 0x0C,
+       RT5033_FUEL_REG_CONFIG_L        = 0x0D,
+       /* Reserved 0x0E~0x0F */
+       RT5033_FUEL_REG_IRQ_CTRL        = 0x10,
+       RT5033_FUEL_REG_IRQ_FLAG        = 0x11,
+       RT5033_FUEL_VMIN                = 0x12,
+       RT5033_FUEL_SMIN                = 0x13,
+       /* Reserved 0x14~0x1F */
+       RT5033_FUEL_VGCOMP1             = 0x20,
+       RT5033_FUEL_VGCOMP2             = 0x21,
+       RT5033_FUEL_VGCOMP3             = 0x22,
+       RT5033_FUEL_VGCOMP4             = 0x23,
+       /* Reserved 0x24~0xFD */
+       RT5033_FUEL_MFA_H               = 0xFE,
+       RT5033_FUEL_MFA_L               = 0xFF,
+
+       RT5033_FUEL_REG_END,
+};
+
+/* RT5033 fuel gauge battery present property */
+#define RT5033_FUEL_BAT_PRESENT                0x02
+
+/* RT5033 PMIC interrupts */
+#define RT5033_PMIC_IRQ_BUCKOCP                2
+#define RT5033_PMIC_IRQ_BUCKLV         3
+#define RT5033_PMIC_IRQ_SAFELDOLV      4
+#define RT5033_PMIC_IRQ_LDOLV          5
+#define RT5033_PMIC_IRQ_OT             6
+#define RT5033_PMIC_IRQ_VDDA_UV                7
+
+#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
new file mode 100644 (file)
index 0000000..010cff4
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * MFD core driver for the RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#ifndef __RT5033_H__
+#define __RT5033_H__
+
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+
+/* RT5033 regulator IDs */
+enum rt5033_regulators {
+       RT5033_BUCK = 0,
+       RT5033_LDO,
+       RT5033_SAFE_LDO,
+
+       RT5033_REGULATOR_NUM,
+};
+
+struct rt5033_dev {
+       struct device *dev;
+
+       struct regmap *regmap;
+       struct regmap_irq_chip_data *irq_data;
+       int irq;
+       bool wakeup;
+};
+
+struct rt5033_battery {
+       struct i2c_client       *client;
+       struct rt5033_dev       *rt5033;
+       struct regmap           *regmap;
+       struct power_supply     psy;
+};
+
+/* RT5033 charger platform data */
+struct rt5033_charger_data {
+       unsigned int pre_uamp;
+       unsigned int pre_uvolt;
+       unsigned int const_uvolt;
+       unsigned int eoc_uamp;
+       unsigned int fast_uamp;
+};
+
+struct rt5033_charger {
+       struct device           *dev;
+       struct rt5033_dev       *rt5033;
+       struct power_supply     psy;
+
+       struct rt5033_charger_data      *chg;
+};
+
+#endif /* __RT5033_H__ */
index 7b6d4e9ff603828181239f5f64cbdf3a6d2cd282..7299e9548906ea4219ee0d6897a217a4d022b981 100644 (file)
@@ -163,6 +163,9 @@ enum {
        MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
        MLX4_QP_FLOW_STEERING_DETACH = 0x66,
        MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
+
+       /* Update and read QCN parameters */
+       MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
 };
 
 enum {
@@ -233,6 +236,16 @@ struct mlx4_config_dev_params {
        u8      rx_csum_flags_port_2;
 };
 
+enum mlx4_en_congestion_control_algorithm {
+       MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
+};
+
+enum mlx4_en_congestion_control_opmod {
+       MLX4_CONGESTION_CONTROL_GET_PARAMS,
+       MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+       MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
index e4ebff7e9d02fba498e8d5ae9792666e45c3fa3b..1cc54822b931e7e89f2388371a55a4d7f4a9441c 100644 (file)
@@ -203,7 +203,8 @@ enum {
        MLX4_DEV_CAP_FLAG2_80_VFS               = 1LL <<  18,
        MLX4_DEV_CAP_FLAG2_FS_A0                = 1LL <<  19,
        MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
-       MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21
+       MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21,
+       MLX4_DEV_CAP_FLAG2_QCN                  = 1LL <<  22,
 };
 
 enum {
index 2bbc62aa818a374d1c488f2eecf4232230bd3f4e..551f85456c11574a144bf64d1c38ec8031313b3e 100644 (file)
@@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg {
 
 enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
-       MLX4_UPDATE_QP_VSD              = 1 << 2,
+       MLX4_UPDATE_QP_VSD              = 1 << 1,
        MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 2) - 1
 };
 
index 3301c4c289d625987efb33e6471a70ce4987ff2e..f17fa75809aa1d05a6164fabb4f02b87106c4657 100644 (file)
@@ -227,6 +227,7 @@ struct mtd_info {
        int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
        int (*_suspend) (struct mtd_info *mtd);
        void (*_resume) (struct mtd_info *mtd);
+       void (*_reboot) (struct mtd_info *mtd);
        /*
         * If the driver is something smart, like UBI, it may need to maintain
         * its own reference counting. The below functions are only for driver.
index 63aeccf9ddc8cbae4b40d06705e5ebb3658d510d..4720b86ee73dce54e39516f3d3371409d1254d3a 100644 (file)
 /* Used for Spansion flashes only. */
 #define SPINOR_OP_BRWR         0x17    /* Bank register write */
 
+/* Used for Micron flashes only. */
+#define SPINOR_OP_RD_EVCR      0x65    /* Read EVCR register */
+#define SPINOR_OP_WD_EVCR      0x61    /* Write EVCR register */
+
 /* Status Register bits. */
 #define SR_WIP                 1       /* Write in progress */
 #define SR_WEL                 2       /* Write enable latch */
@@ -67,6 +71,9 @@
 
 #define SR_QUAD_EN_MX          0x40    /* Macronix Quad I/O */
 
+/* Enhanced Volatile Configuration Register bits */
+#define EVCR_QUAD_EN_MICRON    0x80    /* Micron Quad I/O */
+
 /* Flag Status Register bits */
 #define FSR_READY              0x80
 
index 2007f3b44d05ac86d70dde6522db8d673228d73e..45413784a3b172f3f2a2262258c34e57034179a5 100644 (file)
@@ -768,6 +768,8 @@ struct netdev_phys_item_id {
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
                                       struct sk_buff *skb);
 
+struct fib_info;
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -1031,6 +1033,14 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
  *     Called to notify switch device port of bridge port STP
  *     state change.
+ * int (*ndo_sw_parent_fib_ipv4_add)(struct net_device *dev, __be32 dst,
+ *                                  int dst_len, struct fib_info *fi,
+ *                                  u8 tos, u8 type, u32 tb_id);
+ *     Called to add/modify IPv4 route to switch device.
+ * int (*ndo_sw_parent_fib_ipv4_del)(struct net_device *dev, __be32 dst,
+ *                                  int dst_len, struct fib_info *fi,
+ *                                  u8 tos, u8 type, u32 tb_id);
+ *     Called to delete IPv4 route from switch device.
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1192,6 +1202,18 @@ struct net_device_ops {
                                                            struct netdev_phys_item_id *psid);
        int                     (*ndo_switch_port_stp_update)(struct net_device *dev,
                                                              u8 state);
+       int                     (*ndo_switch_fib_ipv4_add)(struct net_device *dev,
+                                                          __be32 dst,
+                                                          int dst_len,
+                                                          struct fib_info *fi,
+                                                          u8 tos, u8 type,
+                                                          u32 tb_id);
+       int                     (*ndo_switch_fib_ipv4_del)(struct net_device *dev,
+                                                          __be32 dst,
+                                                          int dst_len,
+                                                          struct fib_info *fi,
+                                                          u8 tos, u8 type,
+                                                          u32 tb_id);
 #endif
 };
 
@@ -2341,6 +2363,7 @@ struct gro_remcsum {
 
 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
 {
+       grc->offset = 0;
        grc->delta = 0;
 }
 
index 6d627b92df537ada3886af5850c0fabbac53b66a..2f77e0c651c89874a641c8a04a723aaf60dd2837 100644 (file)
@@ -180,7 +180,6 @@ struct nfs_inode {
         /* NFSv4 state */
        struct list_head        open_states;
        struct nfs_delegation __rcu *delegation;
-       fmode_t                  delegation_state;
        struct rw_semaphore     rwsem;
 
        /* pNFS layout information */
index 38d96ba935c2d7fd91aaf444a598b832b1c0ee4e..4cb3eaa89cf708a57038049db0df75144155d920 100644 (file)
@@ -1167,8 +1167,15 @@ struct nfs41_impl_id {
        struct nfstime4                 date;
 };
 
+struct nfs41_bind_conn_to_session_args {
+       struct nfs_client               *client;
+       struct nfs4_sessionid           sessionid;
+       u32                             dir;
+       bool                            use_conn_in_rdma_mode;
+};
+
 struct nfs41_bind_conn_to_session_res {
-       struct nfs4_session             *session;
+       struct nfs4_sessionid           sessionid;
        u32                             dir;
        bool                            use_conn_in_rdma_mode;
 };
@@ -1185,6 +1192,8 @@ struct nfs41_exchange_id_res {
 
 struct nfs41_create_session_args {
        struct nfs_client              *client;
+       u64                             clientid;
+       uint32_t                        seqid;
        uint32_t                        flags;
        uint32_t                        cb_program;
        struct nfs4_channel_attrs       fc_attrs;       /* Fore Channel */
@@ -1192,7 +1201,11 @@ struct nfs41_create_session_args {
 };
 
 struct nfs41_create_session_res {
-       struct nfs_client              *client;
+       struct nfs4_sessionid           sessionid;
+       uint32_t                        seqid;
+       uint32_t                        flags;
+       struct nfs4_channel_attrs       fc_attrs;       /* Fore Channel */
+       struct nfs4_channel_attrs       bc_attrs;       /* Back Channel */
 };
 
 struct nfs41_reclaim_complete_args {
@@ -1351,7 +1364,7 @@ struct nfs_commit_completion_ops {
 };
 
 struct nfs_commit_info {
-       spinlock_t                      *lock;
+       spinlock_t                      *lock;  /* inode->i_lock */
        struct nfs_mds_commit_info      *mds;
        struct pnfs_ds_commit_info      *ds;
        struct nfs_direct_req           *dreq;  /* O_DIRECT request */
index 19a5d4b23209302bc55cce74c12f69cbd91f260d..0adad4a5419b7cbd7560422de51f9abc97a755a0 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <uapi/linux/nvme.h>
 #include <linux/pci.h>
-#include <linux/miscdevice.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
 
@@ -62,8 +61,6 @@ enum {
        NVME_CSTS_SHST_MASK     = 3 << 2,
 };
 
-#define NVME_VS(major, minor)  (major << 16 | minor)
-
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT        (nvme_io_timeout * HZ)
 
@@ -91,9 +88,10 @@ struct nvme_dev {
        struct nvme_bar __iomem *bar;
        struct list_head namespaces;
        struct kref kref;
-       struct miscdevice miscdev;
+       struct device *device;
        work_func_t reset_workfn;
        struct work_struct reset_work;
+       struct work_struct probe_work;
        char name[12];
        char serial[20];
        char model[40];
@@ -105,7 +103,6 @@ struct nvme_dev {
        u16 abort_limit;
        u8 event_limit;
        u8 vwc;
-       u8 initialized;
 };
 
 /*
@@ -121,6 +118,7 @@ struct nvme_ns {
        unsigned ns_id;
        int lba_shift;
        int ms;
+       int pi_type;
        u64 mode_select_num_blocks;
        u32 mode_select_block_len;
 };
@@ -138,6 +136,7 @@ struct nvme_iod {
        int nents;              /* Used in scatterlist */
        int length;             /* Of data, in bytes */
        dma_addr_t first_dma;
+       struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
        struct scatterlist sg[0];
 };
 
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
new file mode 100644 (file)
index 0000000..9882937
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * board initialization should put one of these structures into platform_data
+ * and place the bfin-rotary onto platform_bus named "bfin-rotary".
+ *
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BFIN_ROTARY_H
+#define _BFIN_ROTARY_H
+
+/* mode bitmasks */
+#define ROT_QUAD_ENC   CNTMODE_QUADENC /* quadrature/grey code encoder mode */
+#define ROT_BIN_ENC    CNTMODE_BINENC  /* binary encoder mode */
+#define ROT_UD_CNT     CNTMODE_UDCNT   /* rotary counter mode */
+#define ROT_DIR_CNT    CNTMODE_DIRCNT  /* direction counter mode */
+
+#define ROT_DEBE       DEBE            /* Debounce Enable */
+
+#define ROT_CDGINV     CDGINV          /* CDG Pin Polarity Invert */
+#define ROT_CUDINV     CUDINV          /* CUD Pin Polarity Invert */
+#define ROT_CZMINV     CZMINV          /* CZM Pin Polarity Invert */
+
+struct bfin_rotary_platform_data {
+       /* set rotary UP KEY_### or BTN_### in case you prefer
+        * bfin-rotary to send EV_KEY otherwise set 0
+        */
+       unsigned int rotary_up_key;
+       /* set rotary DOWN KEY_### or BTN_### in case you prefer
+        * bfin-rotary to send EV_KEY otherwise set 0
+        */
+       unsigned int rotary_down_key;
+       /* set rotary BUTTON KEY_### or BTN_### */
+       unsigned int rotary_button_key;
+       /* set rotary Relative Axis REL_### in case you prefer
+        * bfin-rotary to send EV_REL otherwise set 0
+        */
+       unsigned int rotary_rel_code;
+       unsigned short debounce;        /* 0..17 */
+       unsigned short mode;
+       unsigned short pm_wakeup;
+       unsigned short *pin_list;
+};
+
+/* CNT_CONFIG bitmasks */
+#define CNTE           (1 << 0)        /* Counter Enable */
+#define DEBE           (1 << 1)        /* Debounce Enable */
+#define CDGINV         (1 << 4)        /* CDG Pin Polarity Invert */
+#define CUDINV         (1 << 5)        /* CUD Pin Polarity Invert */
+#define CZMINV         (1 << 6)        /* CZM Pin Polarity Invert */
+#define CNTMODE_SHIFT  8
+#define CNTMODE                (0x7 << CNTMODE_SHIFT)  /* Counter Operating Mode */
+#define ZMZC           (1 << 1)        /* CZM Zeroes Counter Enable */
+#define BNDMODE_SHIFT  12
+#define BNDMODE                (0x3 << BNDMODE_SHIFT)  /* Boundary register Mode */
+#define INPDIS         (1 << 15)       /* CUG and CDG Input Disable */
+
+#define CNTMODE_QUADENC        (0 << CNTMODE_SHIFT)    /* quadrature encoder mode */
+#define CNTMODE_BINENC (1 << CNTMODE_SHIFT)    /* binary encoder mode */
+#define CNTMODE_UDCNT  (2 << CNTMODE_SHIFT)    /* up/down counter mode */
+#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT)    /* direction counter mode */
+#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT)    /* direction timer mode */
+
+#define BNDMODE_COMP   (0 << BNDMODE_SHIFT)    /* boundary compare mode */
+#define BNDMODE_ZERO   (1 << BNDMODE_SHIFT)    /* boundary compare and zero mode */
+#define BNDMODE_CAPT   (2 << BNDMODE_SHIFT)    /* boundary capture mode */
+#define BNDMODE_AEXT   (3 << BNDMODE_SHIFT)    /* boundary auto-extend mode */
+
+/* CNT_IMASK bitmasks */
+#define ICIE           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Enable */
+#define UCIE           (1 << 1)        /* Up count Interrupt Enable */
+#define DCIE           (1 << 2)        /* Down count Interrupt Enable */
+#define MINCIE         (1 << 3)        /* Min Count Interrupt Enable */
+#define MAXCIE         (1 << 4)        /* Max Count Interrupt Enable */
+#define COV31IE                (1 << 5)        /* Bit 31 Overflow Interrupt Enable */
+#define COV15IE                (1 << 6)        /* Bit 15 Overflow Interrupt Enable */
+#define CZEROIE                (1 << 7)        /* Count to Zero Interrupt Enable */
+#define CZMIE          (1 << 8)        /* CZM Pin Interrupt Enable */
+#define CZMEIE         (1 << 9)        /* CZM Error Interrupt Enable */
+#define CZMZIE         (1 << 10)       /* CZM Zeroes Counter Interrupt Enable */
+
+/* CNT_STATUS bitmasks */
+#define ICII           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Identifier */
+#define UCII           (1 << 1)        /* Up count Interrupt Identifier */
+#define DCII           (1 << 2)        /* Down count Interrupt Identifier */
+#define MINCII         (1 << 3)        /* Min Count Interrupt Identifier */
+#define MAXCII         (1 << 4)        /* Max Count Interrupt Identifier */
+#define COV31II                (1 << 5)        /* Bit 31 Overflow Interrupt Identifier */
+#define COV15II                (1 << 6)        /* Bit 15 Overflow Interrupt Identifier */
+#define CZEROII                (1 << 7)        /* Count to Zero Interrupt Identifier */
+#define CZMII          (1 << 8)        /* CZM Pin Interrupt Identifier */
+#define CZMEII         (1 << 9)        /* CZM Error Interrupt Identifier */
+#define CZMZII         (1 << 10)       /* CZM Zeroes Counter Interrupt Identifier */
+
+/* CNT_COMMAND bitmasks */
+#define W1LCNT         0xf             /* Load Counter Register */
+#define W1LMIN         0xf0            /* Load Min Register */
+#define W1LMAX         0xf00           /* Load Max Register */
+#define W1ZMONCE       (1 << 12)       /* Enable CZM Clear Counter Once */
+
+#define W1LCNT_ZERO    (1 << 0)        /* write 1 to load CNT_COUNTER with zero */
+#define W1LCNT_MIN     (1 << 2)        /* write 1 to load CNT_COUNTER from CNT_MIN */
+#define W1LCNT_MAX     (1 << 3)        /* write 1 to load CNT_COUNTER from CNT_MAX */
+
+#define W1LMIN_ZERO    (1 << 4)        /* write 1 to load CNT_MIN with zero */
+#define W1LMIN_CNT     (1 << 5)        /* write 1 to load CNT_MIN from CNT_COUNTER */
+#define W1LMIN_MAX     (1 << 7)        /* write 1 to load CNT_MIN from CNT_MAX */
+
+#define W1LMAX_ZERO    (1 << 8)        /* write 1 to load CNT_MAX with zero */
+#define W1LMAX_CNT     (1 << 9)        /* write 1 to load CNT_MAX from CNT_COUNTER */
+#define W1LMAX_MIN     (1 << 10)       /* write 1 to load CNT_MAX from CNT_MIN */
+
+/* CNT_DEBOUNCE bitmasks */
+#define DPRESCALE      0xf             /* Load Counter Register */
+
+#endif
index d8155c005242e7ca50a9630ab392dde7eef4d29e..87ac14c584f2cddb11f173aed3e8c92000813119 100644 (file)
 
 #include <linux/device.h>
 
+#define DW_DMA_MAX_NR_MASTERS  4
+
 /**
  * struct dw_dma_slave - Controller-specific information about a slave
  *
- * @dma_dev: required DMA master device. Depricated.
+ * @dma_dev:   required DMA master device
  * @src_id:    src request line
  * @dst_id:    dst request line
  * @src_master: src master for transfers on allocated channel.
@@ -53,7 +55,7 @@ struct dw_dma_platform_data {
        unsigned char   chan_priority;
        unsigned short  block_size;
        unsigned char   nr_masters;
-       unsigned char   data_width[4];
+       unsigned char   data_width[DW_DMA_MAX_NR_MASTERS];
 };
 
 #endif /* _PLATFORM_DATA_DMA_DW_H */
index 66574ea39f97dc8b22e49d6625e0a5c16292b821..0c72886030ef0f3555a2fe78a488b1e0109fae81 100644 (file)
@@ -28,6 +28,13 @@ struct sram_platdata {
        int granularity;
 };
 
+#ifdef CONFIG_ARM
 extern struct gen_pool *sram_get_gpool(char *pool_name);
+#else
+static inline struct gen_pool *sram_get_gpool(char *pool_name)
+{
+       return NULL;
+}
+#endif
 
 #endif /* __DMA_MMP_TDMA_H */
index 58851275fed98c352fdd4995e95f1ebe806649e7..d438eeb08bff407043b32d5f52f58d08fac8838f 100644 (file)
@@ -54,10 +54,11 @@ struct rhash_head {
  * @buckets: size * hash buckets
  */
 struct bucket_table {
-       size_t                          size;
-       unsigned int                    locks_mask;
-       spinlock_t                      *locks;
-       struct rhash_head __rcu         *buckets[];
+       size_t                  size;
+       unsigned int            locks_mask;
+       spinlock_t              *locks;
+
+       struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
@@ -78,12 +79,6 @@ struct rhashtable;
  * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
  * @hashfn: Function to hash key
  * @obj_hashfn: Function to hash object
- * @grow_decision: If defined, may return true if table should expand
- * @shrink_decision: If defined, may return true if table should shrink
- *
- * Note: when implementing the grow and shrink decision function, min/max
- * shift must be enforced, otherwise, resizing watermarks they set may be
- * useless.
  */
 struct rhashtable_params {
        size_t                  nelem_hint;
@@ -97,10 +92,6 @@ struct rhashtable_params {
        size_t                  locks_mul;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
-       bool                    (*grow_decision)(const struct rhashtable *ht,
-                                                size_t new_size);
-       bool                    (*shrink_decision)(const struct rhashtable *ht,
-                                                  size_t new_size);
 };
 
 /**
@@ -192,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
 
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
-
 int rhashtable_expand(struct rhashtable *ht);
 int rhashtable_shrink(struct rhashtable *ht);
 
index 41c60e5302d703525c1d99f259de9297ea1b36fd..6d77432e14ff971bffd4ca211dccb917768b2c8c 100644 (file)
@@ -363,9 +363,6 @@ extern void show_regs(struct pt_regs *);
  */
 extern void show_stack(struct task_struct *task, unsigned long *sp);
 
-void io_schedule(void);
-long io_schedule_timeout(long timeout);
-
 extern void cpu_init (void);
 extern void trap_init(void);
 extern void update_process_times(int user);
@@ -422,6 +419,13 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 
+extern long io_schedule_timeout(long timeout);
+
+static inline void io_schedule(void)
+{
+       io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
 struct nsproxy;
 struct user_namespace;
 
index 5c19cba34dce023a49c45d776751190834b88889..fab4d0ddf4eda67ff416bb10a1e7b545639462fd 100644 (file)
@@ -181,6 +181,7 @@ struct ucred {
 #define AF_WANPIPE     25      /* Wanpipe API Sockets */
 #define AF_LLC         26      /* Linux LLC                    */
 #define AF_IB          27      /* Native InfiniBand address    */
+#define AF_MPLS                28      /* MPLS */
 #define AF_CAN         29      /* Controller Area Network      */
 #define AF_TIPC                30      /* TIPC sockets                 */
 #define AF_BLUETOOTH   31      /* Bluetooth sockets            */
@@ -226,6 +227,7 @@ struct ucred {
 #define PF_WANPIPE     AF_WANPIPE
 #define PF_LLC         AF_LLC
 #define PF_IB          AF_IB
+#define PF_MPLS                AF_MPLS
 #define PF_CAN         AF_CAN
 #define PF_TIPC                AF_TIPC
 #define PF_BLUETOOTH   AF_BLUETOOTH
index 7e61a17030a4843bf0f80fda4dc2ae9f379409ef..694eecb2f1b5dffd8fe7f43c2979528ae2639042 100644 (file)
@@ -89,8 +89,11 @@ void                 rpc_free_iostats(struct rpc_iostats *);
 static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
 static inline void rpc_count_iostats(const struct rpc_task *task,
                                     struct rpc_iostats *stats) {}
-static inline void rpc_count_iostats_metrics(const struct rpc_task *,
-                                            struct rpc_iostats *) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
+                                            struct rpc_iostats *stats)
+{
+}
+
 static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
 static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
 
index fc52e307efab8768effbb7880702986653e0a07c..5eac316490eab9ca0ad1a864be2b6fedb823f332 100644 (file)
@@ -314,6 +314,8 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
 }
 
 #endif
+
+#if IS_ENABLED(CONFIG_THERMAL)
 struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
                void *, struct thermal_zone_device_ops *,
                const struct thermal_zone_params *, int, int);
@@ -340,8 +342,58 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
                struct thermal_cooling_device *, int);
 void thermal_cdev_update(struct thermal_cooling_device *);
 void thermal_notify_framework(struct thermal_zone_device *, int);
-
-#ifdef CONFIG_NET
+#else
+static inline struct thermal_zone_device *thermal_zone_device_register(
+       const char *type, int trips, int mask, void *devdata,
+       struct thermal_zone_device_ops *ops,
+       const struct thermal_zone_params *tzp,
+       int passive_delay, int polling_delay)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_zone_device_unregister(
+       struct thermal_zone_device *tz)
+{ }
+static inline int thermal_zone_bind_cooling_device(
+       struct thermal_zone_device *tz, int trip,
+       struct thermal_cooling_device *cdev,
+       unsigned long upper, unsigned long lower)
+{ return -ENODEV; }
+static inline int thermal_zone_unbind_cooling_device(
+       struct thermal_zone_device *tz, int trip,
+       struct thermal_cooling_device *cdev)
+{ return -ENODEV; }
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+{ }
+static inline struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+       const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+       char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cooling_device_unregister(
+       struct thermal_cooling_device *cdev)
+{ }
+static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
+               const char *name)
+{ return ERR_PTR(-ENODEV); }
+static inline int thermal_zone_get_temp(
+               struct thermal_zone_device *tz, unsigned long *temp)
+{ return -ENODEV; }
+static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
+{ return -ENODEV; }
+static inline struct thermal_instance *
+get_thermal_instance(struct thermal_zone_device *tz,
+       struct thermal_cooling_device *cdev, int trip)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{ }
+static inline void thermal_notify_framework(struct thermal_zone_device *tz,
+       int trip)
+{ }
+#endif /* CONFIG_THERMAL */
+
+#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
 extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
                                                enum events event);
 #else
index d3204115f15d21dd7ef3d879df2393884795b037..2d67b8998fd8b49d877d65b0b94a022be47d4e28 100644 (file)
@@ -26,6 +26,7 @@
  * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
  *         operations documented below
  * @mmap: Perform mmap(2) on a region of the device file descriptor
+ * @request: Request for the bus driver to release the device
  */
 struct vfio_device_ops {
        char    *name;
@@ -38,6 +39,7 @@ struct vfio_device_ops {
        long    (*ioctl)(void *device_data, unsigned int cmd,
                         unsigned long arg);
        int     (*mmap)(void *device_data, struct vm_area_struct *vma);
+       void    (*request)(void *device_data, unsigned int count);
 };
 
 extern int vfio_add_group_dev(struct device *dev,
index 5c7b6f0daef8f1f228c6daed124e589acdfdb649..c4b09689ab644719d1aa28fdb9510dfe9c5a8b5b 100644 (file)
 /* Virtio vendor ID - Read Only */
 #define VIRTIO_MMIO_VENDOR_ID          0x00c
 
-/* Bitmask of the features supported by the host
+/* Bitmask of the features supported by the device (host)
  * (32 bits per set) - Read Only */
-#define VIRTIO_MMIO_HOST_FEATURES      0x010
+#define VIRTIO_MMIO_DEVICE_FEATURES    0x010
 
-/* Host features set selector - Write Only */
-#define VIRTIO_MMIO_HOST_FEATURES_SEL  0x014
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL        0x014
 
-/* Bitmask of features activated by the guest
+/* Bitmask of features activated by the driver (guest)
  * (32 bits per set) - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES     0x020
+#define VIRTIO_MMIO_DRIVER_FEATURES    0x020
 
 /* Activated features set selector - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL        0x024
+
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
 
 /* Guest's memory page size in bytes - Write Only */
 #define VIRTIO_MMIO_GUEST_PAGE_SIZE    0x028
 
+#endif
+
+
 /* Queue selector - Write Only */
 #define VIRTIO_MMIO_QUEUE_SEL          0x030
 
 /* Queue size for the currently selected queue - Write Only */
 #define VIRTIO_MMIO_QUEUE_NUM          0x038
 
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
 /* Used Ring alignment for the currently selected queue - Write Only */
 #define VIRTIO_MMIO_QUEUE_ALIGN                0x03c
 
 /* Guest's PFN for the currently selected queue - Read Write */
 #define VIRTIO_MMIO_QUEUE_PFN          0x040
 
+#endif
+
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY                0x044
+
 /* Queue notifier - Write Only */
 #define VIRTIO_MMIO_QUEUE_NOTIFY       0x050
 
 /* Device status register - Read Write */
 #define VIRTIO_MMIO_STATUS             0x070
 
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW     0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH    0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW    0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH   0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW     0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH    0x0a4
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION  0x0fc
+
 /* The config space is defined by each driver as
  * the per-driver configuration space - Read Write */
 #define VIRTIO_MMIO_CONFIG             0x100
index 21ee1860abbc7109006ebc5d625ac25d41d3f6bc..5e0f891d476c299d91fde14ce9cdeedc9a7e26c6 100644 (file)
@@ -9,28 +9,17 @@
 
 extern struct neigh_table arp_tbl;
 
-static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
+static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
 {
+       u32 key = *(const u32 *)pkey;
        u32 val = key ^ hash32_ptr(dev);
 
-       return val * hash_rnd;
+       return val * hash_rnd[0];
 }
 
 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
 {
-       struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
-       struct neighbour *n;
-       u32 hash_val;
-
-       hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
-       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
-            n != NULL;
-            n = rcu_dereference_bh(n->next)) {
-               if (n->dev == dev && *(u32 *)n->primary_key == key)
-                       return n;
-       }
-
-       return NULL;
+       return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
 }
 
 static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
index 45feeba7a3250be44e78a1a70b4a7da529a47edf..16a923a3a43a8825d167b75025baca0b1b5d741c 100644 (file)
@@ -367,11 +367,8 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
                  struct net_device *);
 
 /* ax25_ip.c */
-int ax25_neigh_construct(struct neighbour *neigh);
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
 extern const struct header_ops ax25_header_ops;
-struct ax25_neigh_priv {
-       struct neigh_ops ops;
-};
 
 /* ax25_out.c */
 ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
index 1c1ad46250d5c9e0abb0910f1b7debaa6aaa9608..fe328c52c46bd179b651d6bbb14e58a89f017557 100644 (file)
@@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
  * @return    Checksum of buffer.
  */
 
-u16 cfpkt_iterate(struct cfpkt *pkt,
+int cfpkt_iterate(struct cfpkt *pkt,
                u16 (*iter_func)(u16 chks, void *buf, u16 len),
                u16 data);
 
index 597b88a94332150c2da3f163daccbc012ce1b9cd..207d9ba1f92c8ae1789cff6fdc99ad15f256488b 100644 (file)
@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops {
        int (*ieee_setets) (struct net_device *, struct ieee_ets *);
        int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
        int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+       int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
+       int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
+       int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
        int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_getapp) (struct net_device *, struct dcb_app *);
index fac4e3f4a6d3c0ede92af3d4df4c24319a94c0a0..0f26aa707e62a13225031797c9c2edadbff05a3a 100644 (file)
@@ -22,6 +22,7 @@ int dn_neigh_router_hello(struct sk_buff *skb);
 int dn_neigh_endnode_hello(struct sk_buff *skb);
 void dn_neigh_pointopoint_hello(struct sk_buff *skb);
 int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+int dn_to_neigh_output(struct sk_buff *skb);
 
 extern struct neigh_table dn_neigh_table;
 
index c542c131d5512ae233a98a43468e56a66b5609db..b525ac5165599427204e839fe0a63a127c5a7ed7 100644 (file)
@@ -127,6 +127,11 @@ struct dsa_switch {
        struct dsa_switch_tree  *dst;
        int                     index;
 
+       /*
+        * Tagging protocol understood by this switch
+        */
+       enum dsa_tag_protocol   tag_protocol;
+
        /*
         * Configuration data for this switch.
         */
index 5976bdecf58b05b26980c76ae140d7c016ca939b..b9a6b0a94cc6b52a70158dd2bc7eb847baa6bed0 100644 (file)
@@ -126,6 +126,8 @@ struct inet_connection_sock {
 
                /* Information on the current probe. */
                int               probe_size;
+
+               u32               probe_timestamp;
        } icsk_mtup;
        u32                       icsk_ca_priv[16];
        u32                       icsk_user_timeout;
index cba4b7c329358271fea1f82c39da851189dd1ec9..1657604c5dd326066e0bc19696c6b26b8622adac 100644 (file)
@@ -185,6 +185,7 @@ struct fib_table {
        u32                     tb_id;
        int                     tb_default;
        int                     tb_num_default;
+       struct rcu_head         rcu;
        unsigned long           tb_data[0];
 };
 
@@ -195,6 +196,7 @@ int fib_table_delete(struct fib_table *, struct fib_config *);
 int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
                   struct netlink_callback *cb);
 int fib_table_flush(struct fib_table *table);
+void fib_table_flush_external(struct fib_table *table);
 void fib_free_table(struct fib_table *tb);
 
 
@@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb);
 
 static inline struct fib_table *fib_get_table(struct net *net, u32 id)
 {
+       struct hlist_node *tb_hlist;
        struct hlist_head *ptr;
 
        ptr = id == RT_TABLE_LOCAL ?
                &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
                &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
-       return hlist_entry(ptr->first, struct fib_table, tb_hlist);
+
+       tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
+
+       return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
 }
 
 static inline struct fib_table *fib_new_table(struct net *net, u32 id)
@@ -222,15 +228,19 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
 static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
                             struct fib_result *res)
 {
-       int err = -ENETUNREACH;
+       struct fib_table *tb;
+       int err;
 
        rcu_read_lock();
 
-       if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
-                             FIB_LOOKUP_NOREF) ||
-           !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
-                             FIB_LOOKUP_NOREF))
-               err = 0;
+       for (err = 0; !err; err = -ENETUNREACH) {
+               tb = fib_get_table(net, RT_TABLE_LOCAL);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
+               tb = fib_get_table(net, RT_TABLE_MAIN);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
+       }
 
        rcu_read_unlock();
 
@@ -249,28 +259,33 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
 static inline int fib_lookup(struct net *net, struct flowi4 *flp,
                             struct fib_result *res)
 {
-       if (!net->ipv4.fib_has_custom_rules) {
-               int err = -ENETUNREACH;
-
-               rcu_read_lock();
-
-               res->tclassid = 0;
-               if ((net->ipv4.fib_local &&
-                    !fib_table_lookup(net->ipv4.fib_local, flp, res,
-                                      FIB_LOOKUP_NOREF)) ||
-                   (net->ipv4.fib_main &&
-                    !fib_table_lookup(net->ipv4.fib_main, flp, res,
-                                      FIB_LOOKUP_NOREF)) ||
-                   (net->ipv4.fib_default &&
-                    !fib_table_lookup(net->ipv4.fib_default, flp, res,
-                                      FIB_LOOKUP_NOREF)))
-                       err = 0;
-
-               rcu_read_unlock();
-
-               return err;
+       struct fib_table *tb;
+       int err;
+
+       if (net->ipv4.fib_has_custom_rules)
+               return __fib_lookup(net, flp, res);
+
+       rcu_read_lock();
+
+       res->tclassid = 0;
+
+       for (err = 0; !err; err = -ENETUNREACH) {
+               tb = rcu_dereference_rtnl(net->ipv4.fib_local);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
+
+               tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
+
+               tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+               if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+                       break;
        }
-       return __fib_lookup(net, flp, res);
+
+       rcu_read_unlock();
+
+       return err;
 }
 
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -294,6 +309,7 @@ static inline int fib_num_tclassid_users(struct net *net)
        return 0;
 }
 #endif
+void fib_flush_external(struct net *net);
 
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
index 6bbda34d5e59d030a1e2d69e93cda754d06ae5c0..b3a7751251b4cb9ce1a7fcb1d3999a63f4ff5074 100644 (file)
@@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
 
 static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
 {
-       struct neigh_hash_table *nht;
-       const u32 *p32 = pkey;
-       struct neighbour *n;
-       u32 hash_val;
-
-       nht = rcu_dereference_bh(nd_tbl.nht);
-       hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
-       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
-            n != NULL;
-            n = rcu_dereference_bh(n->next)) {
-               u32 *n32 = (u32 *) n->primary_key;
-               if (n->dev == dev &&
-                   ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
-                    (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0)
-                       return n;
-       }
-
-       return NULL;
+       return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
 }
 
 static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
index 9f912e4d4232d1b5a5ec1ab3f7ef51d0cac730e5..d48b8ec8b5f4aaa797b5ff1ff0aac5291cb032a4 100644 (file)
@@ -197,6 +197,7 @@ struct neigh_table {
        __u32                   (*hash)(const void *pkey,
                                        const struct net_device *dev,
                                        __u32 *hash_rnd);
+       bool                    (*key_eq)(const struct neighbour *, const void *pkey);
        int                     (*constructor)(struct neighbour *);
        int                     (*pconstructor)(struct pneigh_entry *);
        void                    (*pdestructor)(struct pneigh_entry *);
@@ -225,6 +226,7 @@ enum {
        NEIGH_ND_TABLE = 1,
        NEIGH_DN_TABLE = 2,
        NEIGH_NR_TABLES,
+       NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
 };
 
 static inline int neigh_parms_family(struct neigh_parms *p)
@@ -247,6 +249,57 @@ static inline void *neighbour_priv(const struct neighbour *n)
 #define NEIGH_UPDATE_F_ISROUTER                        0x40000000
 #define NEIGH_UPDATE_F_ADMIN                   0x80000000
 
+
+static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
+{
+       return *(const u16 *)n->primary_key == *(const u16 *)pkey;
+}
+
+static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
+{
+       return *(const u32 *)n->primary_key == *(const u32 *)pkey;
+}
+
+static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
+{
+       const u32 *n32 = (const u32 *)n->primary_key;
+       const u32 *p32 = pkey;
+
+       return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+               (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
+}
+
+static inline struct neighbour *___neigh_lookup_noref(
+       struct neigh_table *tbl,
+       bool (*key_eq)(const struct neighbour *n, const void *pkey),
+       __u32 (*hash)(const void *pkey,
+                     const struct net_device *dev,
+                     __u32 *hash_rnd),
+       const void *pkey,
+       struct net_device *dev)
+{
+       struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
+       struct neighbour *n;
+       u32 hash_val;
+
+       hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+            n != NULL;
+            n = rcu_dereference_bh(n->next)) {
+               if (n->dev == dev && key_eq(n, pkey))
+                       return n;
+       }
+
+       return NULL;
+}
+
+static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
+                                                    const void *pkey,
+                                                    struct net_device *dev)
+{
+       return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
+}
+
 void neigh_table_init(int index, struct neigh_table *tbl);
 int neigh_table_clear(int index, struct neigh_table *tbl);
 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -306,6 +359,7 @@ void neigh_for_each(struct neigh_table *tbl,
                    void (*cb)(struct neighbour *, void *), void *cookie);
 void __neigh_for_each_release(struct neigh_table *tbl,
                              int (*cb)(struct neighbour *));
+int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
 void pneigh_for_each(struct neigh_table *tbl,
                     void (*cb)(struct pneigh_entry *));
 
@@ -459,4 +513,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
                memcpy(dst, n->ha, dev->addr_len);
        } while (read_seqretry(&n->ha_lock, seq));
 }
+
+
 #endif
index 36faf4990c4b6f2604fd2b00178c941d75f540a8..2cb9acb618e9b4fe5b8fd8ce4cb95bc8528aa5e0 100644 (file)
@@ -26,6 +26,7 @@
 #endif
 #include <net/netns/nftables.h>
 #include <net/netns/xfrm.h>
+#include <net/netns/mpls.h>
 #include <linux/ns_common.h>
 
 struct user_namespace;
@@ -129,6 +130,9 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_IP_VS)
        struct netns_ipvs       *ipvs;
+#endif
+#if IS_ENABLED(CONFIG_MPLS)
+       struct netns_mpls       mpls;
 #endif
        struct sock             *diag_nlsk;
        atomic_t                fnhe_genid;
index 1b26c6c3fd7cd44a66342c1a76176af0d667b5ad..8f3a1a1a5a94e5626e8b13f5bcc1fb89b590d54a 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/uidgid.h>
 #include <net/inet_frag.h>
+#include <linux/rcupdate.h>
 
 struct tcpm_hash_bucket;
 struct ctl_table_header;
@@ -38,14 +39,15 @@ struct netns_ipv4 {
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        struct fib_rules_ops    *rules_ops;
        bool                    fib_has_custom_rules;
-       struct fib_table        *fib_local;
-       struct fib_table        *fib_main;
-       struct fib_table        *fib_default;
+       struct fib_table __rcu  *fib_local;
+       struct fib_table __rcu  *fib_main;
+       struct fib_table __rcu  *fib_default;
 #endif
 #ifdef CONFIG_IP_ROUTE_CLASSID
        int                     fib_num_tclassid_users;
 #endif
        struct hlist_head       *fib_table_hash;
+       bool                    fib_offload_disabled;
        struct sock             *fibnl;
 
        struct sock  * __percpu *icmp_sk;
@@ -85,6 +87,8 @@ struct netns_ipv4 {
        int sysctl_tcp_fwmark_accept;
        int sysctl_tcp_mtu_probing;
        int sysctl_tcp_base_mss;
+       int sysctl_tcp_probe_threshold;
+       u32 sysctl_tcp_probe_interval;
 
        struct ping_group_range ping_group_range;
 
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
new file mode 100644 (file)
index 0000000..d292036
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * mpls in net namespaces
+ */
+
+#ifndef __NETNS_MPLS_H__
+#define __NETNS_MPLS_H__
+
+struct mpls_route;
+struct ctl_table_header;
+
+struct netns_mpls {
+       size_t platform_labels;
+       struct mpls_route __rcu * __rcu *platform_label;
+       struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_MPLS_H__ */
index c605d305c577074d11bee6f19479dda8a4949ee3..6d778efcfdfd6c8a3973e03424625667ec350c3e 100644 (file)
@@ -213,7 +213,7 @@ struct tcf_proto_ops {
                                            const struct tcf_proto *,
                                            struct tcf_result *);
        int                     (*init)(struct tcf_proto*);
-       void                    (*destroy)(struct tcf_proto*);
+       bool                    (*destroy)(struct tcf_proto*, bool);
 
        unsigned long           (*get)(struct tcf_proto*, u32 handle);
        int                     (*change)(struct net *net, struct sk_buff *,
@@ -399,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                const struct Qdisc_ops *ops, u32 parentid);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
-void tcf_destroy(struct tcf_proto *tp);
+bool tcf_destroy(struct tcf_proto *tp, bool force);
 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
 
 /* Reset all TX qdiscs greater then index of a device.  */
index cfcdac2e5d253ef431bc18709cc5e7edd0e47e6f..933fac410a7abaa392f47692ead8d7d7971ad42b 100644 (file)
@@ -51,6 +51,12 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
                                               struct nlmsghdr *nlh, u16 flags);
 int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
                                               struct nlmsghdr *nlh, u16 flags);
+int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 tb_id);
+int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 tb_id);
+void netdev_switch_fib_ipv4_abort(struct fib_info *fi);
+
 #else
 
 static inline int netdev_switch_parent_id_get(struct net_device *dev,
@@ -109,6 +115,24 @@ static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *
        return 0;
 }
 
+static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len,
+                                            struct fib_info *fi,
+                                            u8 tos, u8 type, u32 tb_id)
+{
+       return 0;
+}
+
+static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len,
+                                            struct fib_info *fi,
+                                            u8 tos, u8 type, u32 tb_id)
+{
+       return 0;
+}
+
+static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+{
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
index f87599d5af8236148dff21cb4d1ff1c708ed1c8a..2e11e38205c226b3a0644e1b4752570414918e50 100644 (file)
@@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCP_MIN_MSS            88U
 
 /* The least MTU to use for probing */
-#define TCP_BASE_MSS           512
+#define TCP_BASE_MSS           1024
+
+/* probing interval, default to 10 minutes as per RFC4821 */
+#define TCP_PROBE_INTERVAL     600
+
+/* Specify interval when tcp mtu probing will stop */
+#define TCP_PROBE_THRESHOLD    8
 
 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
 #define TCP_FASTRETRANS_THRESH 3
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
new file mode 100644 (file)
index 0000000..d3583d3
--- /dev/null
@@ -0,0 +1,897 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION                 "v4.1.0"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT    2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT    2
+#define SECONDS_FOR_ASYNC_LOGOUT       10
+#define SECONDS_FOR_ASYNC_TEXT         10
+#define SECONDS_FOR_LOGOUT_COMP                15
+#define WHITE_SPACE                    " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS                        16
+#define ISCSIT_EXTRA_TAGS              8
+#define ISCSIT_TCP_BACKLOG             256
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT             3
+#define NA_DATAOUT_TIMEOUT_MAX         60
+#define NA_DATAOUT_TIMEOUT_MIX         2
+#define NA_DATAOUT_TIMEOUT_RETRIES     5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT               15
+#define NA_NOPIN_TIMEOUT_MAX           60
+#define NA_NOPIN_TIMEOUT_MIN           3
+#define NA_NOPIN_RESPONSE_TIMEOUT      30
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX  60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN  3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS   0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS   0
+#define NA_RANDOM_R2T_OFFSETS          0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION              1
+#define TA_LOGIN_TIMEOUT               15
+#define TA_LOGIN_TIMEOUT_MAX           30
+#define TA_LOGIN_TIMEOUT_MIN           5
+#define TA_NETIF_TIMEOUT               2
+#define TA_NETIF_TIMEOUT_MAX           15
+#define TA_NETIF_TIMEOUT_MIN           2
+#define TA_GENERATE_NODE_ACLS          0
+#define TA_DEFAULT_CMDSN_DEPTH         64
+#define TA_DEFAULT_CMDSN_DEPTH_MAX     512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN     1
+#define TA_CACHE_DYNAMIC_ACLS          0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT     1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT     0
+#define TA_DEMO_MODE_DISCOVERY         1
+#define TA_DEFAULT_ERL                 0
+#define TA_CACHE_CORE_NPS              0
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI              0
+
+#define ISCSI_IOV_DATA_BUFFER          5
+
+enum iscsit_transport_type {
+       ISCSI_TCP                               = 0,
+       ISCSI_SCTP_TCP                          = 1,
+       ISCSI_SCTP_UDP                          = 2,
+       ISCSI_IWARP_TCP                         = 3,
+       ISCSI_IWARP_SCTP                        = 4,
+       ISCSI_INFINIBAND                        = 5,
+};
+
+/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+       TARG_CONN_STATE_FREE                    = 0x1,
+       TARG_CONN_STATE_XPT_UP                  = 0x3,
+       TARG_CONN_STATE_IN_LOGIN                = 0x4,
+       TARG_CONN_STATE_LOGGED_IN               = 0x5,
+       TARG_CONN_STATE_IN_LOGOUT               = 0x6,
+       TARG_CONN_STATE_LOGOUT_REQUESTED        = 0x7,
+       TARG_CONN_STATE_CLEANUP_WAIT            = 0x8,
+};
+
+/* RFC-3720 7.3.2  Session State Diagram for a Target */
+enum target_sess_state_table {
+       TARG_SESS_STATE_FREE                    = 0x1,
+       TARG_SESS_STATE_ACTIVE                  = 0x2,
+       TARG_SESS_STATE_LOGGED_IN               = 0x3,
+       TARG_SESS_STATE_FAILED                  = 0x4,
+       TARG_SESS_STATE_IN_CONTINUE             = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+       ISCSI_RX_DATA   = 1,
+       ISCSI_TX_DATA   = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+       DATAIN_COMPLETE_NORMAL                  = 1,
+       DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+       DATAIN_COMPLETE_CONNECTION_RECOVERY     = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+       DATAIN_WITHIN_COMMAND_RECOVERY          = 1,
+       DATAIN_CONNECTION_RECOVERY              = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+       TPG_STATE_FREE                          = 0,
+       TPG_STATE_ACTIVE                        = 1,
+       TPG_STATE_INACTIVE                      = 2,
+       TPG_STATE_COLD_RESET                    = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+       TIQN_STATE_ACTIVE                       = 1,
+       TIQN_STATE_SHUTDOWN                     = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+       ICF_GOT_LAST_DATAOUT                    = 0x00000001,
+       ICF_GOT_DATACK_SNACK                    = 0x00000002,
+       ICF_NON_IMMEDIATE_UNSOLICITED_DATA      = 0x00000004,
+       ICF_SENT_LAST_R2T                       = 0x00000008,
+       ICF_WITHIN_COMMAND_RECOVERY             = 0x00000010,
+       ICF_CONTIG_MEMORY                       = 0x00000020,
+       ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
+       ICF_OOO_CMDSN                           = 0x00000080,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+       ISTATE_NO_STATE                 = 0,
+       ISTATE_NEW_CMD                  = 1,
+       ISTATE_DEFERRED_CMD             = 2,
+       ISTATE_UNSOLICITED_DATA         = 3,
+       ISTATE_RECEIVE_DATAOUT          = 4,
+       ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+       ISTATE_RECEIVED_LAST_DATAOUT    = 6,
+       ISTATE_WITHIN_DATAOUT_RECOVERY  = 7,
+       ISTATE_IN_CONNECTION_RECOVERY   = 8,
+       ISTATE_RECEIVED_TASKMGT         = 9,
+       ISTATE_SEND_ASYNCMSG            = 10,
+       ISTATE_SENT_ASYNCMSG            = 11,
+       ISTATE_SEND_DATAIN              = 12,
+       ISTATE_SEND_LAST_DATAIN         = 13,
+       ISTATE_SENT_LAST_DATAIN         = 14,
+       ISTATE_SEND_LOGOUTRSP           = 15,
+       ISTATE_SENT_LOGOUTRSP           = 16,
+       ISTATE_SEND_NOPIN               = 17,
+       ISTATE_SENT_NOPIN               = 18,
+       ISTATE_SEND_REJECT              = 19,
+       ISTATE_SENT_REJECT              = 20,
+       ISTATE_SEND_R2T                 = 21,
+       ISTATE_SENT_R2T                 = 22,
+       ISTATE_SEND_R2T_RECOVERY        = 23,
+       ISTATE_SENT_R2T_RECOVERY        = 24,
+       ISTATE_SEND_LAST_R2T            = 25,
+       ISTATE_SENT_LAST_R2T            = 26,
+       ISTATE_SEND_LAST_R2T_RECOVERY   = 27,
+       ISTATE_SENT_LAST_R2T_RECOVERY   = 28,
+       ISTATE_SEND_STATUS              = 29,
+       ISTATE_SEND_STATUS_BROKEN_PC    = 30,
+       ISTATE_SENT_STATUS              = 31,
+       ISTATE_SEND_STATUS_RECOVERY     = 32,
+       ISTATE_SENT_STATUS_RECOVERY     = 33,
+       ISTATE_SEND_TASKMGTRSP          = 34,
+       ISTATE_SENT_TASKMGTRSP          = 35,
+       ISTATE_SEND_TEXTRSP             = 36,
+       ISTATE_SENT_TEXTRSP             = 37,
+       ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+       ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+       ISTATE_SEND_NOPIN_NO_RESPONSE   = 40,
+       ISTATE_REMOVE                   = 41,
+       ISTATE_FREE                     = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+       CMDSN_ERROR_CANNOT_RECOVER      = -1,
+       CMDSN_NORMAL_OPERATION          = 0,
+       CMDSN_LOWER_THAN_EXP            = 1,
+       CMDSN_HIGHER_THAN_EXP           = 2,
+       CMDSN_MAXCMDSN_OVERRUN          = 3,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+       IMMEDIATE_DATA_CANNOT_RECOVER   = -1,
+       IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+       IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+       DATAOUT_CANNOT_RECOVER          = -1,
+       DATAOUT_NORMAL                  = 0,
+       DATAOUT_SEND_R2T                = 1,
+       DATAOUT_SEND_TO_TRANSPORT       = 2,
+       DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+       NAF_USERID_SET                  = 0x01,
+       NAF_PASSWORD_SET                = 0x02,
+       NAF_USERID_IN_SET               = 0x04,
+       NAF_PASSWORD_IN_SET             = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+       ISCSI_TF_RUNNING                = 0x01,
+       ISCSI_TF_STOP                   = 0x02,
+       ISCSI_TF_EXPIRED                = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+       NPF_IP_NETWORK          = 0x00,
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+       ISCSI_NP_THREAD_ACTIVE          = 1,
+       ISCSI_NP_THREAD_INACTIVE        = 2,
+       ISCSI_NP_THREAD_RESET           = 3,
+       ISCSI_NP_THREAD_SHUTDOWN        = 4,
+       ISCSI_NP_THREAD_EXIT            = 5,
+};
+
+struct iscsi_conn_ops {
+       u8      HeaderDigest;                   /* [0,1] == [None,CRC32C] */
+       u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
+       u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
+       u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
+       u8      OFMarker;                       /* [0,1] == [No,Yes] */
+       u8      IFMarker;                       /* [0,1] == [No,Yes] */
+       u32     OFMarkInt;                      /* [1..65535] */
+       u32     IFMarkInt;                      /* [1..65535] */
+       /*
+        * iSER specific connection parameters
+        */
+       u32     InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
+       u32     TargetRecvDataSegmentLength;    /* [512..2**24-1] */
+};
+
+struct iscsi_sess_ops {
+       char    InitiatorName[224];
+       char    InitiatorAlias[256];
+       char    TargetName[224];
+       char    TargetAlias[256];
+       char    TargetAddress[256];
+       u16     TargetPortalGroupTag;           /* [0..65535] */
+       u16     MaxConnections;                 /* [1..65535] */
+       u8      InitialR2T;                     /* [0,1] == [No,Yes] */
+       u8      ImmediateData;                  /* [0,1] == [No,Yes] */
+       u32     MaxBurstLength;                 /* [512..2**24-1] */
+       u32     FirstBurstLength;               /* [512..2**24-1] */
+       u16     DefaultTime2Wait;               /* [0..3600] */
+       u16     DefaultTime2Retain;             /* [0..3600] */
+       u16     MaxOutstandingR2T;              /* [1..65535] */
+       u8      DataPDUInOrder;                 /* [0,1] == [No,Yes] */
+       u8      DataSequenceInOrder;            /* [0,1] == [No,Yes] */
+       u8      ErrorRecoveryLevel;             /* [0..2] */
+       u8      SessionType;                    /* [0,1] == [Normal,Discovery]*/
+       /*
+        * iSER specific session parameters
+        */
+       u8      RDMAExtensions;                 /* [0,1] == [No,Yes] */
+};
+
+struct iscsi_queue_req {
+       int                     state;
+       struct iscsi_cmd        *cmd;
+       struct list_head        qr_list;
+};
+
+struct iscsi_data_count {
+       int                     data_length;
+       int                     sync_and_steering;
+       enum data_count_type    type;
+       u32                     iov_count;
+       u32                     ss_iov_count;
+       u32                     ss_marker_count;
+       struct kvec             *iov;
+};
+
+struct iscsi_param_list {
+       bool                    iser;
+       struct list_head        param_list;
+       struct list_head        extra_response_list;
+};
+
+struct iscsi_datain_req {
+       enum datain_req_comp_table dr_complete;
+       int                     generate_recovery_values;
+       enum datain_req_rec_table recovery;
+       u32                     begrun;
+       u32                     runlength;
+       u32                     data_length;
+       u32                     data_offset;
+       u32                     data_sn;
+       u32                     next_burst_len;
+       u32                     read_data_done;
+       u32                     seq_send_order;
+       struct list_head        cmd_datain_node;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+       u16                     cid;
+       u32                     batch_count;
+       u32                     cmdsn;
+       u32                     exp_cmdsn;
+       struct iscsi_cmd        *cmd;
+       struct list_head        ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+       u8                      flags;
+       u32                     data_sn;
+       u32                     length;
+       u32                     offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+       int                     seq_complete;
+       int                     recovery_r2t;
+       int                     sent_r2t;
+       u32                     r2t_sn;
+       u32                     offset;
+       u32                     targ_xfer_tag;
+       u32                     xfer_len;
+       struct list_head        r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+       enum iscsi_timer_flags_table dataout_timer_flags;
+       /* DataOUT timeout retries */
+       u8                      dataout_timeout_retries;
+       /* Within command recovery count */
+       u8                      error_recovery_count;
+       /* iSCSI dependent state for out or order CmdSNs */
+       enum cmd_i_state_table  deferred_i_state;
+       /* iSCSI dependent state */
+       enum cmd_i_state_table  i_state;
+       /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+       u8                      immediate_cmd;
+       /* Immediate data present */
+       u8                      immediate_data;
+       /* iSCSI Opcode */
+       u8                      iscsi_opcode;
+       /* iSCSI Response Code */
+       u8                      iscsi_response;
+       /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+       u8                      logout_reason;
+       /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+       u8                      logout_response;
+       /* MaxCmdSN has been incremented */
+       u8                      maxcmdsn_inc;
+       /* Immediate Unsolicited Dataout */
+       u8                      unsolicited_data;
+       /* Reject reason code */
+       u8                      reject_reason;
+       /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+       u16                     logout_cid;
+       /* Command flags */
+       enum cmd_flags_table    cmd_flags;
+       /* Initiator Task Tag assigned from Initiator */
+       itt_t                   init_task_tag;
+       /* Target Transfer Tag assigned from Target */
+       u32                     targ_xfer_tag;
+       /* CmdSN assigned from Initiator */
+       u32                     cmd_sn;
+       /* ExpStatSN assigned from Initiator */
+       u32                     exp_stat_sn;
+       /* StatSN assigned to this ITT */
+       u32                     stat_sn;
+       /* DataSN Counter */
+       u32                     data_sn;
+       /* R2TSN Counter */
+       u32                     r2t_sn;
+       /* Last DataSN acknowledged via DataAck SNACK */
+       u32                     acked_data_sn;
+       /* Used for echoing NOPOUT ping data */
+       u32                     buf_ptr_size;
+       /* Used to store DataDigest */
+       u32                     data_crc;
+       /* Counter for MaxOutstandingR2T */
+       u32                     outstanding_r2ts;
+       /* Next R2T Offset when DataSequenceInOrder=Yes */
+       u32                     r2t_offset;
+       /* Iovec current and orig count for iscsi_cmd->iov_data */
+       u32                     iov_data_count;
+       u32                     orig_iov_data_count;
+       /* Number of miscellaneous iovecs used for IP stack calls */
+       u32                     iov_misc_count;
+       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       u32                     pdu_count;
+       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+       u32                     pdu_send_order;
+       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       u32                     pdu_start;
+       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+       u32                     seq_send_order;
+       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+       u32                     seq_count;
+       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+       u32                     seq_no;
+       /* Lowest offset in current DataOUT sequence */
+       u32                     seq_start_offset;
+       /* Highest offset in current DataOUT sequence */
+       u32                     seq_end_offset;
+       /* Total size in bytes received so far of READ data */
+       u32                     read_data_done;
+       /* Total size in bytes received so far of WRITE data */
+       u32                     write_data_done;
+       /* Counter for FirstBurstLength key */
+       u32                     first_burst_len;
+       /* Counter for MaxBurstLength key */
+       u32                     next_burst_len;
+       /* Transfer size used for IP stack calls */
+       u32                     tx_size;
+       /* Buffer used for various purposes */
+       void                    *buf_ptr;
+       /* Used by SendTargets=[iqn.,eui.] discovery */
+       void                    *text_in_ptr;
+       /* See include/linux/dma-mapping.h */
+       enum dma_data_direction data_direction;
+       /* iSCSI PDU Header + CRC */
+       unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+       /* Number of times struct iscsi_cmd is present in immediate queue */
+       atomic_t                immed_queue_count;
+       atomic_t                response_queue_count;
+       spinlock_t              datain_lock;
+       spinlock_t              dataout_timeout_lock;
+       /* spinlock for protecting struct iscsi_cmd->i_state */
+       spinlock_t              istate_lock;
+       /* spinlock for adding within command recovery entries */
+       spinlock_t              error_lock;
+       /* spinlock for adding R2Ts */
+       spinlock_t              r2t_lock;
+       /* DataIN List */
+       struct list_head        datain_list;
+       /* R2T List */
+       struct list_head        cmd_r2t_list;
+       /* Timer for DataOUT */
+       struct timer_list       dataout_timer;
+       /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+       struct kvec             *iov_data;
+       /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS                      5
+       struct kvec             iov_misc[ISCSI_MISC_IOVECS];
+       /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+       struct iscsi_pdu        *pdu_list;
+       /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+       struct iscsi_pdu        *pdu_ptr;
+       /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+       struct iscsi_seq        *seq_list;
+       /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+       struct iscsi_seq        *seq_ptr;
+       /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+       struct iscsi_tmr_req    *tmr_req;
+       /* Connection this command is alligient to */
+       struct iscsi_conn       *conn;
+       /* Pointer to connection recovery entry */
+       struct iscsi_conn_recovery *cr;
+       /* Session the command is part of,  used for connection recovery */
+       struct iscsi_session    *sess;
+       /* list_head for connection list */
+       struct list_head        i_conn_node;
+       /* The TCM I/O descriptor that is accessed via container_of() */
+       struct se_cmd           se_cmd;
+       /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
+       unsigned char           sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+       u32                     padding;
+       u8                      pad_bytes[4];
+
+       struct scatterlist      *first_data_sg;
+       u32                     first_data_sg_off;
+       u32                     kmapped_nents;
+       sense_reason_t          sense_reason;
+}  ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+       bool                    task_reassign:1;
+       u32                     exp_data_sn;
+       struct iscsi_cmd        *ref_cmd;
+       struct iscsi_conn_recovery *conn_recovery;
+       struct se_tmr_req       *se_tmr_req;
+};
+
+struct iscsi_conn {
+       wait_queue_head_t       queues_wq;
+       /* Authentication Successful for this connection */
+       u8                      auth_complete;
+       /* State connection is currently in */
+       u8                      conn_state;
+       u8                      conn_logout_reason;
+       u8                      network_transport;
+       enum iscsi_timer_flags_table nopin_timer_flags;
+       enum iscsi_timer_flags_table nopin_response_timer_flags;
+       /* Used to know what thread encountered a transport failure */
+       u8                      which_thread;
+       /* connection id assigned by the Initiator */
+       u16                     cid;
+       /* Remote TCP Port */
+       u16                     login_port;
+       u16                     local_port;
+       int                     net_size;
+       int                     login_family;
+       u32                     auth_id;
+       u32                     conn_flags;
+       /* Used for iscsi_tx_login_rsp() */
+       itt_t                   login_itt;
+       u32                     exp_statsn;
+       /* Per connection status sequence number */
+       u32                     stat_sn;
+       /* IFMarkInt's Current Value */
+       u32                     if_marker;
+       /* OFMarkInt's Current Value */
+       u32                     of_marker;
+       /* Used for calculating OFMarker offset to next PDU */
+       u32                     of_marker_offset;
+#define IPV6_ADDRESS_SPACE                             48
+       unsigned char           login_ip[IPV6_ADDRESS_SPACE];
+       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
+       int                     conn_usage_count;
+       int                     conn_waiting_on_uc;
+       atomic_t                check_immediate_queue;
+       atomic_t                conn_logout_remove;
+       atomic_t                connection_exit;
+       atomic_t                connection_recovery;
+       atomic_t                connection_reinstatement;
+       atomic_t                connection_wait_rcfr;
+       atomic_t                sleep_on_conn_wait_comp;
+       atomic_t                transport_failed;
+       struct completion       conn_post_wait_comp;
+       struct completion       conn_wait_comp;
+       struct completion       conn_wait_rcfr_comp;
+       struct completion       conn_waiting_on_uc_comp;
+       struct completion       conn_logout_comp;
+       struct completion       tx_half_close_comp;
+       struct completion       rx_half_close_comp;
+       /* socket used by this connection */
+       struct socket           *sock;
+       void                    (*orig_data_ready)(struct sock *);
+       void                    (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE                1
+#define LOGIN_FLAGS_CLOSED             2
+#define LOGIN_FLAGS_READY              4
+       unsigned long           login_flags;
+       struct delayed_work     login_work;
+       struct delayed_work     login_cleanup_work;
+       struct iscsi_login      *login;
+       struct timer_list       nopin_timer;
+       struct timer_list       nopin_response_timer;
+       struct timer_list       transport_timer;
+       struct task_struct      *login_kworker;
+       /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+       spinlock_t              cmd_lock;
+       spinlock_t              conn_usage_lock;
+       spinlock_t              immed_queue_lock;
+       spinlock_t              nopin_timer_lock;
+       spinlock_t              response_queue_lock;
+       spinlock_t              state_lock;
+       /* libcrypto RX and TX contexts for crc32c */
+       struct hash_desc        conn_rx_hash;
+       struct hash_desc        conn_tx_hash;
+       /* Used for scheduling TX and RX connection kthreads */
+       cpumask_var_t           conn_cpumask;
+       unsigned int            conn_rx_reset_cpumask:1;
+       unsigned int            conn_tx_reset_cpumask:1;
+       /* list_head of struct iscsi_cmd for this connection */
+       struct list_head        conn_cmd_list;
+       struct list_head        immed_queue_list;
+       struct list_head        response_queue_list;
+       struct iscsi_conn_ops   *conn_ops;
+       struct iscsi_login      *conn_login;
+       struct iscsit_transport *conn_transport;
+       struct iscsi_param_list *param_list;
+       /* Used for per connection auth state machine */
+       void                    *auth_protocol;
+       void                    *context;
+       struct iscsi_login_thread_s *login_thread;
+       struct iscsi_portal_group *tpg;
+       struct iscsi_tpg_np     *tpg_np;
+       /* Pointer to parent session */
+       struct iscsi_session    *sess;
+       /* Pointer to thread_set in use for this conn's threads */
+       struct iscsi_thread_set *thread_set;
+       /* list_head for session connection list */
+       struct list_head        conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+       u16                     cid;
+       u32                     cmd_count;
+       u32                     maxrecvdatasegmentlength;
+       u32                     maxxmitdatasegmentlength;
+       int                     ready_for_reallegiance;
+       struct list_head        conn_recovery_cmd_list;
+       spinlock_t              conn_recovery_cmd_lock;
+       struct timer_list       time2retain_timer;
+       struct iscsi_session    *sess;
+       struct list_head        cr_list;
+}  ____cacheline_aligned;
+
+struct iscsi_session {
+       u8                      initiator_vendor;
+       u8                      isid[6];
+       enum iscsi_timer_flags_table time2retain_timer_flags;
+       u8                      version_active;
+       u16                     cid_called;
+       u16                     conn_recovery_count;
+       u16                     tsih;
+       /* state session is currently in */
+       u32                     session_state;
+       /* session wide counter: initiator assigned task tag */
+       itt_t                   init_task_tag;
+       /* session wide counter: target assigned task tag */
+       u32                     targ_xfer_tag;
+       u32                     cmdsn_window;
+
+       /* protects cmdsn values */
+       struct mutex            cmdsn_mutex;
+       /* session wide counter: expected command sequence number */
+       u32                     exp_cmd_sn;
+       /* session wide counter: maximum allowed command sequence number */
+       u32                     max_cmd_sn;
+       struct list_head        sess_ooo_cmdsn_list;
+
+       /* LIO specific session ID */
+       u32                     sid;
+       char                    auth_type[8];
+       /* unique within the target */
+       int                     session_index;
+       /* Used for session reference counting */
+       int                     session_usage_count;
+       int                     session_waiting_on_uc;
+       atomic_long_t           cmd_pdus;
+       atomic_long_t           rsp_pdus;
+       atomic_long_t           tx_data_octets;
+       atomic_long_t           rx_data_octets;
+       atomic_long_t           conn_digest_errors;
+       atomic_long_t           conn_timeout_errors;
+       u64                     creation_time;
+       /* Number of active connections */
+       atomic_t                nconn;
+       atomic_t                session_continuation;
+       atomic_t                session_fall_back_to_erl0;
+       atomic_t                session_logout;
+       atomic_t                session_reinstatement;
+       atomic_t                session_stop_active;
+       atomic_t                sleep_on_sess_wait_comp;
+       /* connection list */
+       struct list_head        sess_conn_list;
+       struct list_head        cr_active_list;
+       struct list_head        cr_inactive_list;
+       spinlock_t              conn_lock;
+       spinlock_t              cr_a_lock;
+       spinlock_t              cr_i_lock;
+       spinlock_t              session_usage_lock;
+       spinlock_t              ttt_lock;
+       struct completion       async_msg_comp;
+       struct completion       reinstatement_comp;
+       struct completion       session_wait_comp;
+       struct completion       session_waiting_on_uc_comp;
+       struct timer_list       time2retain_timer;
+       struct iscsi_sess_ops   *sess_ops;
+       struct se_session       *se_sess;
+       struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+       u8 auth_complete;
+       u8 checked_for_existing;
+       u8 current_stage;
+       u8 leading_connection;
+       u8 first_request;
+       u8 version_min;
+       u8 version_max;
+       u8 login_complete;
+       u8 login_failed;
+       bool zero_tsih;
+       char isid[6];
+       u32 cmd_sn;
+       itt_t init_task_tag;
+       u32 initial_exp_statsn;
+       u32 rsp_length;
+       u16 cid;
+       u16 tsih;
+       char req[ISCSI_HDR_LEN];
+       char rsp[ISCSI_HDR_LEN];
+       char *req_buf;
+       char *rsp_buf;
+       struct iscsi_conn *conn;
+       struct iscsi_np *np;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+       u32                     dataout_timeout;
+       u32                     dataout_timeout_retries;
+       u32                     default_erl;
+       u32                     nopin_timeout;
+       u32                     nopin_response_timeout;
+       u32                     random_datain_pdu_offsets;
+       u32                     random_datain_seq_offsets;
+       u32                     random_r2t_offsets;
+       u32                     tmr_cold_reset;
+       u32                     tmr_warm_reset;
+       struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+       enum naf_flags_table    naf_flags;
+       int                     authenticate_target;
+       /* Used for iscsit_global->discovery_auth,
+        * set to zero (auth disabled) by default */
+       int                     enforce_discovery_auth;
+#define MAX_USER_LEN                           256
+#define MAX_PASS_LEN                           256
+       char                    userid[MAX_USER_LEN];
+       char                    password[MAX_PASS_LEN];
+       char                    userid_mutual[MAX_USER_LEN];
+       char                    password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+       struct config_group     iscsi_sess_stats_group;
+       struct config_group     iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+       struct iscsi_node_attrib node_attrib;
+       struct iscsi_node_auth  node_auth;
+       struct iscsi_node_stat_grps node_stat_grps;
+       struct se_node_acl      se_node_acl;
+};
+
+struct iscsi_tpg_attrib {
+       u32                     authentication;
+       u32                     login_timeout;
+       u32                     netif_timeout;
+       u32                     generate_node_acls;
+       u32                     cache_dynamic_acls;
+       u32                     default_cmdsn_depth;
+       u32                     demo_mode_write_protect;
+       u32                     prod_mode_write_protect;
+       u32                     demo_mode_discovery;
+       u32                     default_erl;
+       u8                      t10_pi;
+       struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+       int                     np_network_transport;
+       int                     np_ip_proto;
+       int                     np_sock_type;
+       enum np_thread_state_table np_thread_state;
+       bool                    enabled;
+       enum iscsi_timer_flags_table np_login_timer_flags;
+       u32                     np_exports;
+       enum np_flags_table     np_flags;
+       unsigned char           np_ip[IPV6_ADDRESS_SPACE];
+       u16                     np_port;
+       spinlock_t              np_thread_lock;
+       struct completion       np_restart_comp;
+       struct socket           *np_socket;
+       struct __kernel_sockaddr_storage np_sockaddr;
+       struct task_struct      *np_thread;
+       struct timer_list       np_login_timer;
+       void                    *np_context;
+       struct iscsit_transport *np_transport;
+       struct list_head        np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+       struct iscsi_np         *tpg_np;
+       struct iscsi_portal_group *tpg;
+       struct iscsi_tpg_np     *tpg_np_parent;
+       struct list_head        tpg_np_list;
+       struct list_head        tpg_np_child_list;
+       struct list_head        tpg_np_parent_list;
+       struct se_tpg_np        se_tpg_np;
+       spinlock_t              tpg_np_parent_lock;
+       struct completion       tpg_np_comp;
+       struct kref             tpg_np_kref;
+};
+
+struct iscsi_portal_group {
+       unsigned char           tpg_chap_id;
+       /* TPG State */
+       enum tpg_state_table    tpg_state;
+       /* Target Portal Group Tag */
+       u16                     tpgt;
+       /* Id assigned to target sessions */
+       u16                     ntsih;
+       /* Number of active sessions */
+       u32                     nsessions;
+       /* Number of Network Portals available for this TPG */
+       u32                     num_tpg_nps;
+       /* Per TPG LIO specific session ID. */
+       u32                     sid;
+       /* Spinlock for adding/removing Network Portals */
+       spinlock_t              tpg_np_lock;
+       spinlock_t              tpg_state_lock;
+       struct se_portal_group tpg_se_tpg;
+       struct mutex            tpg_access_lock;
+       struct semaphore        np_login_sem;
+       struct iscsi_tpg_attrib tpg_attrib;
+       struct iscsi_node_auth  tpg_demo_auth;
+       /* Pointer to default list of iSCSI parameters for TPG */
+       struct iscsi_param_list *param_list;
+       struct iscsi_tiqn       *tpg_tiqn;
+       struct list_head        tpg_gnp_list;
+       struct list_head        tpg_list;
+} ____cacheline_aligned;
+
+struct iscsi_wwn_stat_grps {
+       struct config_group     iscsi_stat_group;
+       struct config_group     iscsi_instance_group;
+       struct config_group     iscsi_sess_err_group;
+       struct config_group     iscsi_tgt_attr_group;
+       struct config_group     iscsi_login_stats_group;
+       struct config_group     iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN                          224
+       unsigned char           tiqn[ISCSI_IQN_LEN];
+       enum tiqn_state_table   tiqn_state;
+       int                     tiqn_access_count;
+       u32                     tiqn_active_tpgs;
+       u32                     tiqn_ntpgs;
+       u32                     tiqn_num_tpg_nps;
+       u32                     tiqn_nsessions;
+       struct list_head        tiqn_list;
+       struct list_head        tiqn_tpg_list;
+       spinlock_t              tiqn_state_lock;
+       spinlock_t              tiqn_tpg_lock;
+       struct se_wwn           tiqn_wwn;
+       struct iscsi_wwn_stat_grps tiqn_stat_grps;
+       int                     tiqn_index;
+       struct iscsi_sess_err_stats  sess_err_stats;
+       struct iscsi_login_stats     login_stats;
+       struct iscsi_logout_stats    logout_stats;
+} ____cacheline_aligned;
+
+struct iscsit_global {
+       /* In core shutdown */
+       u32                     in_shutdown;
+       u32                     active_ts;
+       /* Unique identifier used for the authentication daemon */
+       u32                     auth_id;
+       u32                     inactive_ts;
+       /* Thread Set bitmap count */
+       int                     ts_bitmap_count;
+       /* Thread Set bitmap pointer */
+       unsigned long           *ts_bitmap;
+       /* Used for iSCSI discovery session authentication */
+       struct iscsi_node_acl   discovery_acl;
+       struct iscsi_portal_group       *discovery_tpg;
+};
+
+static inline u32 session_get_next_ttt(struct iscsi_session *session)
+{
+       u32 ttt;
+
+       spin_lock_bh(&session->ttt_lock);
+       ttt = session->targ_xfer_tag++;
+       if (ttt == 0xFFFFFFFF)
+               ttt = session->targ_xfer_tag++;
+       spin_unlock_bh(&session->ttt_lock);
+
+       return ttt;
+}
+
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
new file mode 100644 (file)
index 0000000..3ff76b4
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN         0
+#define ISCSI_SESS_ERR_DIGEST          1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT     2
+#define ISCSI_SESS_ERR_PDU_FORMAT      3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+       spinlock_t      lock;
+       u32             digest_errors;
+       u32             cxn_timeout_errors;
+       u32             pdu_format_errors;
+       u32             last_sess_failure_type;
+       char            last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER         2
+#define ISCSI_LOGIN_FAIL_REDIRECT      3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE     4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE  5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE     6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+       spinlock_t      lock;
+       u32             accepts;
+       u32             other_fails;
+       u32             redirects;
+       u32             authorize_fails;
+       u32             authenticate_fails;
+       u32             negotiate_fails;        /* used for notifications */
+       u64             last_fail_time;         /* time stamp (jiffies) */
+       u32             last_fail_type;
+       int             last_intr_fail_ip_family;
+       unsigned char   last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+       char            last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+       spinlock_t      lock;
+       u32             normal_logouts;
+       u32             abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif   /*** ISCSI_TARGET_STAT_H ***/
index daef9daa500c11f0ff7d92151fafbf9080a80e02..e6bb166f12c212aac238d0d951594cce65851145 100644 (file)
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 #include <linux/list.h>
-#include "../../../drivers/target/iscsi/iscsi_target_core.h"
+#include "iscsi_target_core.h"
 
 struct iscsit_transport {
 #define ISCSIT_TRANSPORT_NAME  16
index 4a8795a87b9e99f30ee07f43fdce3984ddc658e0..672150b6aaf52bc24c640d16f1f7c841e0be655d 100644 (file)
@@ -407,7 +407,7 @@ struct t10_reservation {
        /* Activate Persistence across Target Power Loss enabled
         * for SCSI device */
        int pr_aptpl_active;
-#define PR_APTPL_BUF_LEN                       8192
+#define PR_APTPL_BUF_LEN                       262144
        u32 pr_generation;
        spinlock_t registration_lock;
        spinlock_t aptpl_reg_lock;
index 611e1c5893b490d93d0c87a1a2d383d6d8ef435d..b6dec05c7196a22511e346242724406eef88265b 100644 (file)
@@ -495,8 +495,7 @@ struct btrfs_ioctl_send_args {
 
 /* Error codes as returned by the kernel */
 enum btrfs_err_code {
-       notused,
-       BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
+       BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
        BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
        BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
        BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
index e711f20dc522ee22c728a338a203b6ce4c27d22a..6497d7933d5be0c97ffa2cf629895ca41230483c 100644 (file)
@@ -78,6 +78,70 @@ struct ieee_maxrate {
        __u64   tc_maxrate[IEEE_8021QAZ_MAX_TCS];
 };
 
+enum dcbnl_cndd_states {
+       DCB_CNDD_RESET = 0,
+       DCB_CNDD_EDGE,
+       DCB_CNDD_INTERIOR,
+       DCB_CNDD_INTERIOR_READY,
+};
+
+/* This structure contains the IEEE 802.1Qau QCN managed object.
+ *
+ *@rpg_enable: enable QCN RP
+ *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port
+ *@rpg_time_reset: time between rate increases if no CNMs received.
+ *                given in u-seconds
+ *@rpg_byte_reset: transmitted data between rate increases if no CNMs received.
+ *                given in Bytes
+ *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count
+ *                before RP rate control state machine advances states
+ *@rpg_max_rate: the maxinun rate, in Mbits per second,
+ *              at which an RP can transmit
+ *@rpg_ai_rate: The rate, in Mbits per second,
+ *             used to increase rpTargetRate in the RPR_ACTIVE_INCREASE
+ *@rpg_hai_rate: The rate, in Mbits per second,
+ *              used to increase rpTargetRate in the RPR_HYPER_INCREASE state
+ *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate.
+ *        rpgGd is given as log2(Gd), where Gd may only be powers of 2
+ *@rpg_min_dec_fac: The minimum factor by which the current transmit rate
+ *                 can be changed by reception of a CNM.
+ *                 value is given as percentage (1-100)
+ *@rpg_min_rate: The minimum value, in bits per second, for rate to limit
+ *@cndd_state_machine: The state of the congestion notification domain
+ *                    defense state machine, as defined by IEEE 802.3Qau
+ *                    section 32.1.1. In the interior ready state,
+ *                    the QCN capable hardware may add CN-TAG TLV to the
+ *                    outgoing traffic, to specifically identify outgoing
+ *                    flows.
+ */
+
+struct ieee_qcn {
+       __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS];
+       __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS];
+       __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS];
+       __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS];
+};
+
+/* This structure contains the IEEE 802.1Qau QCN statistics.
+ *
+ *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated
+ *                      by RPs at this priority level on this Port
+ *@rppp_created_rps: number of active RPs(flows) that react to CNMs
+ */
+
+struct ieee_qcn_stats {
+       __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS];
+       __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS];
+};
+
 /* This structure contains the IEEE 802.1Qaz PFC managed object
  *
  * @pfc_cap: Indicates the number of traffic classes on the local device
@@ -334,6 +398,8 @@ enum ieee_attrs {
        DCB_ATTR_IEEE_PEER_PFC,
        DCB_ATTR_IEEE_PEER_APP,
        DCB_ATTR_IEEE_MAXRATE,
+       DCB_ATTR_IEEE_QCN,
+       DCB_ATTR_IEEE_QCN_STATS,
        __DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
index dfd0bb22e554e7d7ac2cb45e2c4021b901346f38..756436e1ce8970a215c27d52816ed068deea33e2 100644 (file)
@@ -247,6 +247,7 @@ enum {
        IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
        IFLA_BRPORT_PROXYARP,   /* proxy ARP */
        IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
+       IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
index 26386cf3db444cbca7bc9e7138f8b0e01c0669b6..aef9a81b2d75b9524000f3b1b1bf858464e575e2 100644 (file)
@@ -115,7 +115,13 @@ struct nvme_id_ns {
        __le16                  nawun;
        __le16                  nawupf;
        __le16                  nacwu;
-       __u8                    rsvd40[80];
+       __le16                  nabsn;
+       __le16                  nabo;
+       __le16                  nabspf;
+       __u16                   rsvd46;
+       __le64                  nvmcap[2];
+       __u8                    rsvd64[40];
+       __u8                    nguid[16];
        __u8                    eui64[8];
        struct nvme_lbaf        lbaf[16];
        __u8                    rsvd192[192];
@@ -124,10 +130,22 @@ struct nvme_id_ns {
 
 enum {
        NVME_NS_FEAT_THIN       = 1 << 0,
+       NVME_NS_FLBAS_LBA_MASK  = 0xf,
+       NVME_NS_FLBAS_META_EXT  = 0x10,
        NVME_LBAF_RP_BEST       = 0,
        NVME_LBAF_RP_BETTER     = 1,
        NVME_LBAF_RP_GOOD       = 2,
        NVME_LBAF_RP_DEGRADED   = 3,
+       NVME_NS_DPC_PI_LAST     = 1 << 4,
+       NVME_NS_DPC_PI_FIRST    = 1 << 3,
+       NVME_NS_DPC_PI_TYPE3    = 1 << 2,
+       NVME_NS_DPC_PI_TYPE2    = 1 << 1,
+       NVME_NS_DPC_PI_TYPE1    = 1 << 0,
+       NVME_NS_DPS_PI_FIRST    = 1 << 3,
+       NVME_NS_DPS_PI_MASK     = 0x7,
+       NVME_NS_DPS_PI_TYPE1    = 1,
+       NVME_NS_DPS_PI_TYPE2    = 2,
+       NVME_NS_DPS_PI_TYPE3    = 3,
 };
 
 struct nvme_smart_log {
@@ -261,6 +279,10 @@ enum {
        NVME_RW_DSM_LATENCY_LOW         = 3 << 4,
        NVME_RW_DSM_SEQ_REQ             = 1 << 6,
        NVME_RW_DSM_COMPRESSED          = 1 << 7,
+       NVME_RW_PRINFO_PRCHK_REF        = 1 << 10,
+       NVME_RW_PRINFO_PRCHK_APP        = 1 << 11,
+       NVME_RW_PRINFO_PRCHK_GUARD      = 1 << 12,
+       NVME_RW_PRINFO_PRACT            = 1 << 13,
 };
 
 struct nvme_dsm_cmd {
@@ -549,6 +571,8 @@ struct nvme_passthru_cmd {
        __u32   result;
 };
 
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID          _IO('N', 0x40)
index 89f63503f903dd25f6c6a99594f7ace7dc5bac68..31891d9535e2a4ede364627a805d6d346fae8b9c 100644 (file)
@@ -185,4 +185,9 @@ struct prctl_mm_map {
 #define PR_MPX_ENABLE_MANAGEMENT  43
 #define PR_MPX_DISABLE_MANAGEMENT 44
 
+#define PR_SET_FP_MODE         45
+#define PR_GET_FP_MODE         46
+# define PR_FP_MODE_FR         (1 << 0)        /* 64b FP registers */
+# define PR_FP_MODE_FRE                (1 << 1)        /* 32b compatibility */
+
 #endif /* _LINUX_PRCTL_H */
index 5cc5d66bf519f65cb4b29e041c3b06fdbe01c889..c3722b024e73afb6459ce3f1528bdef92cc18971 100644 (file)
@@ -303,6 +303,8 @@ enum rtattr_type_t {
        RTA_TABLE,
        RTA_MARK,
        RTA_MFC_STATS,
+       RTA_VIA,
+       RTA_NEWDST,
        __RTA_MAX
 };
 
@@ -332,6 +334,7 @@ struct rtnexthop {
 #define RTNH_F_DEAD            1       /* Nexthop is dead (used by multipath)  */
 #define RTNH_F_PERVASIVE       2       /* Do recursive gateway lookup  */
 #define RTNH_F_ONLINK          4       /* Gateway is forced on link    */
+#define RTNH_F_EXTERNAL                8       /* Route installed externally   */
 
 /* Macros to handle hexthops */
 
@@ -344,6 +347,12 @@ struct rtnexthop {
 #define RTNH_SPACE(len)        RTNH_ALIGN(RTNH_LENGTH(len))
 #define RTNH_DATA(rtnh)   ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
 
+/* RTA_VIA */
+struct rtvia {
+       __kernel_sa_family_t    rtvia_family;
+       __u8                    rtvia_addr[0];
+};
+
 /* RTM_CACHEINFO */
 
 struct rta_cacheinfo {
@@ -623,6 +632,8 @@ enum rtnetlink_groups {
 #define RTNLGRP_IPV6_NETCONF   RTNLGRP_IPV6_NETCONF
        RTNLGRP_MDB,
 #define RTNLGRP_MDB            RTNLGRP_MDB
+       RTNLGRP_MPLS_ROUTE,
+#define RTNLGRP_MPLS_ROUTE     RTNLGRP_MPLS_ROUTE
        __RTNLGRP_MAX
 };
 #define RTNLGRP_MAX    (__RTNLGRP_MAX - 1)
index 19d5219b0b991eda86a5bb8a0274d35a5a88ce17..242cf0c6e33d37f229a224839ca6f679bb142674 100644 (file)
@@ -9,3 +9,4 @@ header-y += tc_pedit.h
 header-y += tc_skbedit.h
 header-y += tc_vlan.h
 header-y += tc_bpf.h
+header-y += tc_connmark.h
index 8d723824ad6934825d34077431fc43d11ae4a718..d4c8f142ba633d8e96d5a227ce1d91dc79c70235 100644 (file)
@@ -83,11 +83,20 @@ enum {
        TIPC_NLA_BEARER_NAME,           /* string */
        TIPC_NLA_BEARER_PROP,           /* nest */
        TIPC_NLA_BEARER_DOMAIN,         /* u32 */
+       TIPC_NLA_BEARER_UDP_OPTS,       /* nest */
 
        __TIPC_NLA_BEARER_MAX,
        TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
 };
 
+enum {
+       TIPC_NLA_UDP_UNSPEC,
+       TIPC_NLA_UDP_LOCAL,             /* sockaddr_storage */
+       TIPC_NLA_UDP_REMOTE,            /* sockaddr_storage */
+
+       __TIPC_NLA_UDP_MAX,
+       TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
+};
 /* Socket info */
 enum {
        TIPC_NLA_SOCK_UNSPEC,
index 29715d27548f21b20303861c24f56faeab835cc9..82889c30f4f5a79fb820c1dd20e1e8a0f99b4bf0 100644 (file)
@@ -333,6 +333,7 @@ enum {
        VFIO_PCI_MSI_IRQ_INDEX,
        VFIO_PCI_MSIX_IRQ_INDEX,
        VFIO_PCI_ERR_IRQ_INDEX,
+       VFIO_PCI_REQ_IRQ_INDEX,
        VFIO_PCI_NUM_IRQS
 };
 
index be40f7059e939de1022b9a06f78cb0050b2cc149..4b0488f20b2ef5bfa9c3c4e94c4dba112a9d0527 100644 (file)
@@ -36,8 +36,7 @@
 /* Size of a PFN in the balloon interface. */
 #define VIRTIO_BALLOON_PFN_SHIFT 12
 
-struct virtio_balloon_config
-{
+struct virtio_balloon_config {
        /* Number of pages host wants Guest to give up. */
        __le32 num_pages;
        /* Number of pages we've actually got in balloon. */
index 247c8ba8544a33ea896aa684b928bb9c3db512b5..3c53eec4ae22697ecb87522bf83de79f4e7e2b0e 100644 (file)
 #include <linux/virtio_types.h>
 
 /* Feature bits */
-#define VIRTIO_BLK_F_BARRIER   0       /* Does host support barriers? */
 #define VIRTIO_BLK_F_SIZE_MAX  1       /* Indicates maximum segment size */
 #define VIRTIO_BLK_F_SEG_MAX   2       /* Indicates maximum # of segments */
 #define VIRTIO_BLK_F_GEOMETRY  4       /* Legacy geometry available  */
 #define VIRTIO_BLK_F_RO                5       /* Disk is read-only */
 #define VIRTIO_BLK_F_BLK_SIZE  6       /* Block size of disk is available*/
-#define VIRTIO_BLK_F_SCSI      7       /* Supports scsi command passthru */
-#define VIRTIO_BLK_F_WCE       9       /* Writeback mode enabled after reset */
 #define VIRTIO_BLK_F_TOPOLOGY  10      /* Topology information is available */
-#define VIRTIO_BLK_F_CONFIG_WCE        11      /* Writeback mode available in config */
 #define VIRTIO_BLK_F_MQ                12      /* support more than one vq */
 
+/* Legacy feature bits */
+#ifndef VIRTIO_BLK_NO_LEGACY
+#define VIRTIO_BLK_F_BARRIER   0       /* Does host support barriers? */
+#define VIRTIO_BLK_F_SCSI      7       /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_WCE       9       /* Writeback mode enabled after reset */
+#define VIRTIO_BLK_F_CONFIG_WCE        11      /* Writeback mode available in config */
 #ifndef __KERNEL__
 /* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
 #define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
 #endif
+#endif /* !VIRTIO_BLK_NO_LEGACY */
 
 #define VIRTIO_BLK_ID_BYTES    20      /* ID string length */
 
@@ -100,8 +103,10 @@ struct virtio_blk_config {
 #define VIRTIO_BLK_T_IN                0
 #define VIRTIO_BLK_T_OUT       1
 
+#ifndef VIRTIO_BLK_NO_LEGACY
 /* This bit says it's a scsi command, not an actual read or write. */
 #define VIRTIO_BLK_T_SCSI_CMD  2
+#endif /* VIRTIO_BLK_NO_LEGACY */
 
 /* Cache flush command */
 #define VIRTIO_BLK_T_FLUSH     4
@@ -109,8 +114,10 @@ struct virtio_blk_config {
 /* Get device ID command */
 #define VIRTIO_BLK_T_GET_ID    8
 
+#ifndef VIRTIO_BLK_NO_LEGACY
 /* Barrier before this op. */
 #define VIRTIO_BLK_T_BARRIER   0x80000000
+#endif /* !VIRTIO_BLK_NO_LEGACY */
 
 /* This is the first element of the read scatter-gather list. */
 struct virtio_blk_outhdr {
@@ -122,12 +129,14 @@ struct virtio_blk_outhdr {
        __virtio64 sector;
 };
 
+#ifndef VIRTIO_BLK_NO_LEGACY
 struct virtio_scsi_inhdr {
        __virtio32 errors;
        __virtio32 data_len;
        __virtio32 sense_len;
        __virtio32 residual;
 };
+#endif /* !VIRTIO_BLK_NO_LEGACY */
 
 /* And this is the final byte of the write scatter-gather list. */
 #define VIRTIO_BLK_S_OK                0
index a6d0cdeaacd4dc86ed1feea096f540a8d4e0e4d1..c18264df9504c17ce11c84ef1a14f12a132cb35e 100644 (file)
 #define VIRTIO_TRANSPORT_F_START       28
 #define VIRTIO_TRANSPORT_F_END         33
 
+#ifndef VIRTIO_CONFIG_NO_LEGACY
 /* Do we get callbacks when the ring is completely used, even if we've
  * suppressed them? */
 #define VIRTIO_F_NOTIFY_ON_EMPTY       24
 
 /* Can the device handle any descriptor layout? */
 #define VIRTIO_F_ANY_LAYOUT            27
+#endif /* VIRTIO_CONFIG_NO_LEGACY */
 
 /* v1.0 compliant. */
 #define VIRTIO_F_VERSION_1             32
index b5f1677b291c979a48153ac0f1bc38eb5b9a6337..7bbee79ca2933f518a16f0250e7bbfd0a4e85fe0 100644 (file)
@@ -35,7 +35,6 @@
 #define VIRTIO_NET_F_CSUM      0       /* Host handles pkts w/ partial csum */
 #define VIRTIO_NET_F_GUEST_CSUM        1       /* Guest handles pkts w/ partial csum */
 #define VIRTIO_NET_F_MAC       5       /* Host has given MAC address. */
-#define VIRTIO_NET_F_GSO       6       /* Host handles pkts w/ any GSO type */
 #define VIRTIO_NET_F_GUEST_TSO4        7       /* Guest can handle TSOv4 in. */
 #define VIRTIO_NET_F_GUEST_TSO6        8       /* Guest can handle TSOv6 in. */
 #define VIRTIO_NET_F_GUEST_ECN 9       /* Guest can handle TSO[6] w/ ECN in. */
                                         * Steering */
 #define VIRTIO_NET_F_CTRL_MAC_ADDR 23  /* Set MAC address */
 
+#ifndef VIRTIO_NET_NO_LEGACY
+#define VIRTIO_NET_F_GSO       6       /* Host handles pkts w/ any GSO type */
+#endif /* VIRTIO_NET_NO_LEGACY */
+
 #define VIRTIO_NET_S_LINK_UP   1       /* Link is up */
 #define VIRTIO_NET_S_ANNOUNCE  2       /* Announcement is needed */
 
@@ -71,19 +74,39 @@ struct virtio_net_config {
        __u16 max_virtqueue_pairs;
 } __attribute__((packed));
 
+/*
+ * This header comes first in the scatter-gather list.  If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ *
+ * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
+ * only flattened.
+ */
+struct virtio_net_hdr_v1 {
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM    1       /* Use csum_start, csum_offset */
+#define VIRTIO_NET_HDR_F_DATA_VALID    2       /* Csum is valid */
+       __u8 flags;
+#define VIRTIO_NET_HDR_GSO_NONE                0       /* Not a GSO frame */
+#define VIRTIO_NET_HDR_GSO_TCPV4       1       /* GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP         3       /* GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6       4       /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN         0x80    /* TCP has ECN set */
+       __u8 gso_type;
+       __virtio16 hdr_len;     /* Ethernet + IP + tcp/udp hdrs */
+       __virtio16 gso_size;    /* Bytes to append to hdr_len per frame */
+       __virtio16 csum_start;  /* Position to start checksumming from */
+       __virtio16 csum_offset; /* Offset after that to place checksum */
+       __virtio16 num_buffers; /* Number of merged rx buffers */
+};
+
+#ifndef VIRTIO_NET_NO_LEGACY
 /* This header comes first in the scatter-gather list.
- * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
  * be the first element of the scatter-gather list.  If you don't
  * specify GSO or CSUM features, you can simply ignore the header. */
 struct virtio_net_hdr {
-#define VIRTIO_NET_HDR_F_NEEDS_CSUM    1       // Use csum_start, csum_offset
-#define VIRTIO_NET_HDR_F_DATA_VALID    2       // Csum is valid
+       /* See VIRTIO_NET_HDR_F_* */
        __u8 flags;
-#define VIRTIO_NET_HDR_GSO_NONE                0       // Not a GSO frame
-#define VIRTIO_NET_HDR_GSO_TCPV4       1       // GSO frame, IPv4 TCP (TSO)
-#define VIRTIO_NET_HDR_GSO_UDP         3       // GSO frame, IPv4 UDP (UFO)
-#define VIRTIO_NET_HDR_GSO_TCPV6       4       // GSO frame, IPv6 TCP
-#define VIRTIO_NET_HDR_GSO_ECN         0x80    // TCP has ECN set
+       /* See VIRTIO_NET_HDR_GSO_* */
        __u8 gso_type;
        __virtio16 hdr_len;             /* Ethernet + IP + tcp/udp hdrs */
        __virtio16 gso_size;            /* Bytes to append to hdr_len per frame */
@@ -97,6 +120,7 @@ struct virtio_net_hdr_mrg_rxbuf {
        struct virtio_net_hdr hdr;
        __virtio16 num_buffers; /* Number of merged rx buffers */
 };
+#endif /* ...VIRTIO_NET_NO_LEGACY */
 
 /*
  * Control virtqueue data structures
index 35b552c7f33027c2a5005482b022ef88b07e5e0d..75301468359f0c558ff8c3c12450301c80cff19a 100644 (file)
@@ -39,7 +39,7 @@
 #ifndef _LINUX_VIRTIO_PCI_H
 #define _LINUX_VIRTIO_PCI_H
 
-#include <linux/virtio_config.h>
+#include <linux/types.h>
 
 #ifndef VIRTIO_PCI_NO_LEGACY
 
 /* Vector value used to disable MSI for queue */
 #define VIRTIO_MSI_NO_VECTOR            0xffff
 
+#ifndef VIRTIO_PCI_NO_MODERN
+
+/* IDs for different capabilities.  Must all exist. */
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG      1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG      2
+/* ISR access */
+#define VIRTIO_PCI_CAP_ISR_CFG         3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG      4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG         5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+       __u8 cap_vndr;          /* Generic PCI field: PCI_CAP_ID_VNDR */
+       __u8 cap_next;          /* Generic PCI field: next ptr. */
+       __u8 cap_len;           /* Generic PCI field: capability length */
+       __u8 cfg_type;          /* Identifies the structure. */
+       __u8 bar;               /* Where to find it. */
+       __u8 padding[3];        /* Pad to full dword. */
+       __le32 offset;          /* Offset within bar. */
+       __le32 length;          /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+       struct virtio_pci_cap cap;
+       __le32 notify_off_multiplier;   /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+       /* About the whole device. */
+       __le32 device_feature_select;   /* read-write */
+       __le32 device_feature;          /* read-only */
+       __le32 guest_feature_select;    /* read-write */
+       __le32 guest_feature;           /* read-write */
+       __le16 msix_config;             /* read-write */
+       __le16 num_queues;              /* read-only */
+       __u8 device_status;             /* read-write */
+       __u8 config_generation;         /* read-only */
+
+       /* About a specific virtqueue. */
+       __le16 queue_select;            /* read-write */
+       __le16 queue_size;              /* read-write, power of 2. */
+       __le16 queue_msix_vector;       /* read-write */
+       __le16 queue_enable;            /* read-write */
+       __le16 queue_notify_off;        /* read-only */
+       __le32 queue_desc_lo;           /* read-write */
+       __le32 queue_desc_hi;           /* read-write */
+       __le32 queue_avail_lo;          /* read-write */
+       __le32 queue_avail_hi;          /* read-write */
+       __le32 queue_used_lo;           /* read-write */
+       __le32 queue_used_hi;           /* read-write */
+};
+
+/* Macro versions of offsets for the Old Timers! */
+#define VIRTIO_PCI_CAP_VNDR            0
+#define VIRTIO_PCI_CAP_NEXT            1
+#define VIRTIO_PCI_CAP_LEN             2
+#define VIRTIO_PCI_CAP_CFG_TYPE                3
+#define VIRTIO_PCI_CAP_BAR             4
+#define VIRTIO_PCI_CAP_OFFSET          8
+#define VIRTIO_PCI_CAP_LENGTH          12
+
+#define VIRTIO_PCI_NOTIFY_CAP_MULT     16
+
+#define VIRTIO_PCI_COMMON_DFSELECT     0
+#define VIRTIO_PCI_COMMON_DF           4
+#define VIRTIO_PCI_COMMON_GFSELECT     8
+#define VIRTIO_PCI_COMMON_GF           12
+#define VIRTIO_PCI_COMMON_MSIX         16
+#define VIRTIO_PCI_COMMON_NUMQ         18
+#define VIRTIO_PCI_COMMON_STATUS       20
+#define VIRTIO_PCI_COMMON_CFGGENERATION        21
+#define VIRTIO_PCI_COMMON_Q_SELECT     22
+#define VIRTIO_PCI_COMMON_Q_SIZE       24
+#define VIRTIO_PCI_COMMON_Q_MSIX       26
+#define VIRTIO_PCI_COMMON_Q_ENABLE     28
+#define VIRTIO_PCI_COMMON_Q_NOFF       30
+#define VIRTIO_PCI_COMMON_Q_DESCLO     32
+#define VIRTIO_PCI_COMMON_Q_DESCHI     36
+#define VIRTIO_PCI_COMMON_Q_AVAILLO    40
+#define VIRTIO_PCI_COMMON_Q_AVAILHI    44
+#define VIRTIO_PCI_COMMON_Q_USEDLO     48
+#define VIRTIO_PCI_COMMON_Q_USEDHI     52
+
+#endif /* VIRTIO_PCI_NO_MODERN */
+
 #endif
index 867cc5084afbfce8ab24cfd87d5f52cda93697de..b513e662d8e4999401f3c7366368bfb3c7900664 100644 (file)
@@ -90,6 +90,7 @@ enum {
 };
 
 enum {
+       IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
        IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
        __u8  reserved[4];
 };
 
+struct ib_uverbs_ex_query_device {
+       __u32 comp_mask;
+       __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+       __u64 general_caps;
+       struct {
+               __u32 rc_odp_caps;
+               __u32 uc_odp_caps;
+               __u32 ud_odp_caps;
+       } per_transport_caps;
+       __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+       struct ib_uverbs_query_device_resp base;
+       __u32 comp_mask;
+       __u32 response_length;
+       struct ib_uverbs_odp_caps odp_caps;
+};
+
 struct ib_uverbs_query_port {
        __u64 response;
        __u8  port_num;
index 7491ee5d81647d704a7d34ce17e107c7c374b64d..83338210ee045277b785e2af35b11b30b24c1e74 100644 (file)
@@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void)
 }
 #endif
 
+#ifdef CONFIG_PREEMPT
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+}
+
+#else
+
+DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+       __this_cpu_write(xen_in_preemptible_hcall, true);
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+       __this_cpu_write(xen_in_preemptible_hcall, false);
+}
+
+#endif /* CONFIG_PREEMPT */
+
 #endif /* INCLUDE_XEN_OPS_H */
index 058e3671fa11ecab043f414946ae539b8fadf2ec..f5dbc6d4261bcb47e7d7dccfa6385f649ecefa0a 100644 (file)
@@ -921,7 +921,7 @@ config NUMA_BALANCING_DEFAULT_ENABLED
          machine.
 
 menuconfig CGROUPS
-       boolean "Control Group support"
+       bool "Control Group support"
        select KERNFS
        help
          This option adds support for grouping sets of processes together, for
@@ -1290,8 +1290,8 @@ endif
 config CC_OPTIMIZE_FOR_SIZE
        bool "Optimize for size"
        help
-         Enabling this option will pass "-Os" instead of "-O2" to gcc
-         resulting in a smaller kernel.
+         Enabling this option will pass "-Os" instead of "-O2" to
+         your compiler resulting in a smaller kernel.
 
          If unsure, say N.
 
@@ -1762,7 +1762,7 @@ config SLABINFO
        default y
 
 config RT_MUTEXES
-       boolean
+       bool
 
 config BASE_SMALL
        int
index a64e7a207d2b5cd123b65f7143c6d659c0aed726..50603aec766a09ef81de518bb2a506c25c187c1c 100644 (file)
@@ -656,6 +656,11 @@ void bpf_prog_free(struct bpf_prog *fp)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_free);
 
+/* Weak definitions of helper functions in case we don't have bpf syscall. */
+const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
+const struct bpf_func_proto bpf_map_update_elem_proto __weak;
+const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
+
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
  */
index 07ce18ca71e0cd46b70155269a77b04af23f6526..0874e2edd2756bcbe5542a496fb6b131b092b3ea 100644 (file)
@@ -604,7 +604,7 @@ return_normal:
                   online_cpus)
                cpu_relax();
        if (!time_left)
-               pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
+               pr_crit("Timed out waiting for secondary CPUs.\n");
 
        /*
         * At this point the primary processor is completely
@@ -696,6 +696,14 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
 
        if (arch_kgdb_ops.enable_nmi)
                arch_kgdb_ops.enable_nmi(0);
+       /*
+        * Avoid entering the debugger if we were triggered due to an oops
+        * but panic_timeout indicates the system should automatically
+        * reboot on panic. We don't want to get stuck waiting for input
+        * on such systems, especially if its "just" an oops.
+        */
+       if (signo != SIGTRAP && panic_timeout)
+               return 1;
 
        memset(ks, 0, sizeof(struct kgdb_state));
        ks->cpu                 = raw_smp_processor_id();
@@ -828,6 +836,15 @@ static int kgdb_panic_event(struct notifier_block *self,
                            unsigned long val,
                            void *data)
 {
+       /*
+        * Avoid entering the debugger if we were triggered due to a panic
+        * We don't want to get stuck waiting for input from user in such case.
+        * panic_timeout indicates the system should automatically
+        * reboot on panic.
+        */
+       if (panic_timeout)
+               return NOTIFY_DONE;
+
        if (dbg_kdb_mode)
                kdb_printf("PANIC: %s\n", (char *)data);
        kgdb_breakpoint();
index 7c70812caea5b3223fe1a9b7d9252980f90a02fe..fc1ef736253c79954686d018a2deca4c86300fa6 100644 (file)
@@ -439,7 +439,7 @@ poll_again:
  *     substituted for %d, %x or %o in the prompt.
  */
 
-char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
 {
        if (prompt && kdb_prompt_str != prompt)
                strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
@@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor)
        return 0;
 }
 
-int vkdb_printf(const char *fmt, va_list ap)
+int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 {
        int diag;
        int linecount;
@@ -680,6 +680,12 @@ int vkdb_printf(const char *fmt, va_list ap)
                        size_avail = sizeof(kdb_buffer) - len;
                        goto kdb_print_out;
                }
+               if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
+                       /*
+                        * This was a interactive search (using '/' at more
+                        * prompt) and it has completed. Clear the flag.
+                        */
+                       kdb_grepping_flag = 0;
                /*
                 * at this point the string is a full line and
                 * should be printed, up to the null.
@@ -691,19 +697,20 @@ kdb_printit:
         * Write to all consoles.
         */
        retlen = strlen(kdb_buffer);
+       cp = (char *) printk_skip_level(kdb_buffer);
        if (!dbg_kdb_mode && kgdb_connected) {
-               gdbstub_msg_write(kdb_buffer, retlen);
+               gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
        } else {
                if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = retlen;
-                       cp = kdb_buffer;
+                       len = retlen - (cp - kdb_buffer);
+                       cp2 = cp;
                        while (len--) {
-                               dbg_io_ops->write_char(*cp);
-                               cp++;
+                               dbg_io_ops->write_char(*cp2);
+                               cp2++;
                        }
                }
                while (c) {
-                       c->write(c, kdb_buffer, retlen);
+                       c->write(c, cp, retlen - (cp - kdb_buffer));
                        touch_nmi_watchdog();
                        c = c->next;
                }
@@ -711,7 +718,10 @@ kdb_printit:
        if (logging) {
                saved_loglevel = console_loglevel;
                console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-               printk(KERN_INFO "%s", kdb_buffer);
+               if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
+                       printk("%s", kdb_buffer);
+               else
+                       pr_info("%s", kdb_buffer);
        }
 
        if (KDB_STATE(PAGER)) {
@@ -794,11 +804,23 @@ kdb_printit:
                        kdb_nextline = linecount - 1;
                        kdb_printf("\r");
                        suspend_grep = 1; /* for this recursion */
+               } else if (buf1[0] == '/' && !kdb_grepping_flag) {
+                       kdb_printf("\r");
+                       kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
+                                  kdbgetenv("SEARCHPROMPT") ?: "search> ");
+                       *strchrnul(kdb_grep_string, '\n') = '\0';
+                       kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
+                       suspend_grep = 1; /* for this recursion */
                } else if (buf1[0] && buf1[0] != '\n') {
                        /* user hit something other than enter */
                        suspend_grep = 1; /* for this recursion */
-                       kdb_printf("\nOnly 'q' or 'Q' are processed at more "
-                                  "prompt, input ignored\n");
+                       if (buf1[0] != '/')
+                               kdb_printf(
+                                   "\nOnly 'q', 'Q' or '/' are processed at "
+                                   "more prompt, input ignored\n");
+                       else
+                               kdb_printf("\n'/' cannot be used during | "
+                                          "grep filtering, input ignored\n");
                } else if (kdb_grepping_flag) {
                        /* user hit enter */
                        suspend_grep = 1; /* for this recursion */
@@ -844,7 +866,7 @@ int kdb_printf(const char *fmt, ...)
        int r;
 
        va_start(ap, fmt);
-       r = vkdb_printf(fmt, ap);
+       r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
        va_end(ap);
 
        return r;
index 7b40c5f07dce8d09e1ebaba547e401b5655befbb..4121345498e0e48f10b414a4b12e0b5f15daeabd 100644 (file)
@@ -50,8 +50,7 @@
 static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
 module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
 
-#define GREP_LEN 256
-char kdb_grep_string[GREP_LEN];
+char kdb_grep_string[KDB_GREP_STRLEN];
 int kdb_grepping_flag;
 EXPORT_SYMBOL(kdb_grepping_flag);
 int kdb_grep_leading;
@@ -870,7 +869,7 @@ static void parse_grep(const char *str)
        len = strlen(cp);
        if (!len)
                return;
-       if (len >= GREP_LEN) {
+       if (len >= KDB_GREP_STRLEN) {
                kdb_printf("search string too long\n");
                return;
        }
@@ -915,13 +914,12 @@ int kdb_parse(const char *cmdstr)
        char *cp;
        char *cpp, quoted;
        kdbtab_t *tp;
-       int i, escaped, ignore_errors = 0, check_grep;
+       int i, escaped, ignore_errors = 0, check_grep = 0;
 
        /*
         * First tokenize the command string.
         */
        cp = (char *)cmdstr;
-       kdb_grepping_flag = check_grep = 0;
 
        if (KDB_FLAG(CMD_INTERRUPT)) {
                /* Previous command was interrupted, newline must not
@@ -1247,7 +1245,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                kdb_printf("due to NonMaskable Interrupt @ "
                           kdb_machreg_fmt "\n",
                           instruction_pointer(regs));
-               kdb_dumpregs(regs);
                break;
        case KDB_REASON_SSTEP:
        case KDB_REASON_BREAK:
@@ -1281,6 +1278,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                 */
                kdb_nextline = 1;
                KDB_STATE_CLEAR(SUPPRESS);
+               kdb_grepping_flag = 0;
+               /* ensure the old search does not leak into '/' commands */
+               kdb_grep_string[0] = '\0';
 
                cmdbuf = cmd_cur;
                *cmdbuf = '\0';
@@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
        /*
         * Validate cpunum
         */
-       if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
+       if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
                return KDB_BADCPUNUM;
 
        dbg_switch_cpu = cpunum;
@@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv)
 #define K(x) ((x) << (PAGE_SHIFT - 10))
        kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
                   "Buffers:        %8lu kB\n",
-                  val.totalram, val.freeram, val.bufferram);
+                  K(val.totalram), K(val.freeram), K(val.bufferram));
        return 0;
 }
 
index eaacd1693954b13aa55c59028c597b6ed91488f3..75014d7f45681b1a75643fd06dbc119e8ea16cb7 100644 (file)
@@ -196,7 +196,9 @@ extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
 
 /* Miscellaneous functions and data areas */
 extern int kdb_grepping_flag;
+#define KDB_GREPPING_FLAG_SEARCH 0x8000
 extern char kdb_grep_string[];
+#define KDB_GREP_STRLEN 256
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
@@ -209,7 +211,7 @@ extern void kdb_ps1(const struct task_struct *p);
 extern void kdb_print_nameval(const char *name, unsigned long val);
 extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
 extern void kdb_meminfo_proc_show(void);
-extern char *kdb_getstr(char *, size_t, char *);
+extern char *kdb_getstr(char *, size_t, const char *);
 extern void kdb_gdb_state_pass(char *buf);
 
 /* Defines for kdb_symbol_print */
index 52aa7e8de92705c02c061c3ecd3087cb1061b4f8..752d6486b67e15eba9113116972a2cd0450ca0cc 100644 (file)
@@ -1,33 +1,7 @@
 ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
 
-# if-lt
-# Usage VAR := $(call if-lt, $(a), $(b))
-# Returns 1 if (a < b)
-if-lt = $(shell [ $(1) -lt $(2) ] && echo 1)
-
-ifeq ($(CONFIG_GCOV_FORMAT_3_4),y)
-  cc-ver := 0304
-else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
-  cc-ver := 0407
-else
-# Use cc-version if available, otherwise set 0
-#
-# scripts/Kbuild.include, which contains cc-version function, is not included
-# during make clean "make -f scripts/Makefile.clean obj=kernel/gcov"
-# Meaning cc-ver is empty causing if-lt test to fail with
-# "/bin/sh: line 0: [: -lt: unary operator expected" error mesage.
-# This has no affect on the clean phase, but the error message could be
-# confusing/annoying. So this dummy workaround sets cc-ver to zero if cc-version
-# is not available. We can probably move if-lt to Kbuild.include, so it's also
-# not defined during clean or to include Kbuild.include in
-# scripts/Makefile.clean. But the following workaround seems least invasive.
-  cc-ver := $(if $(call cc-version),$(call cc-version),0)
-endif
-
-obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o
-
-ifeq ($(call if-lt, $(cc-ver), 0407),1)
-  obj-$(CONFIG_GCOV_KERNEL) += gcc_3_4.o
-else
-  obj-$(CONFIG_GCOV_KERNEL) += gcc_4_7.o
-endif
+obj-y := base.o fs.o
+obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
+obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
+obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \
+                                                       gcc_3_4.o, gcc_4_7.o)
index ff7f47d026ac48b21d6239f9db36ee8662585a45..782172f073c5ed4bde5318bf96777ee79c618b89 100644 (file)
@@ -314,12 +314,12 @@ static void notrace klp_ftrace_handler(unsigned long ip,
        rcu_read_lock();
        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
                                      stack_node);
-       rcu_read_unlock();
-
        if (WARN_ON_ONCE(!func))
-               return;
+               goto unlock;
 
        klp_arch_set_pc(regs, (unsigned long)func->new_func);
+unlock:
+       rcu_read_unlock();
 }
 
 static int klp_disable_func(struct klp_func *func)
@@ -731,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
        func->state = KLP_DISABLED;
 
        return kobject_init_and_add(&func->kobj, &klp_ktype_func,
-                                   obj->kobj, func->old_name);
+                                   obj->kobj, "%s", func->old_name);
 }
 
 /* parts of the initialization that is done only when the object is loaded */
@@ -807,7 +807,7 @@ static int klp_init_patch(struct klp_patch *patch)
        patch->state = KLP_DISABLED;
 
        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
-                                  klp_root_kobj, patch->mod->name);
+                                  klp_root_kobj, "%s", patch->mod->name);
        if (ret)
                goto unlock;
 
index 3059bc2f022daa6e4d8d976c39a7d8a8f546d813..6357265a31ad1a34b881aba31abe27ab6d3921ec 100644 (file)
@@ -1193,7 +1193,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
        if (unlikely(ret)) {
-               remove_waiter(lock, &waiter);
+               __set_current_state(TASK_RUNNING);
+               if (rt_mutex_has_waiters(lock))
+                       remove_waiter(lock, &waiter);
                rt_mutex_handle_deadlock(ret, chwalk, &waiter);
        }
 
index c06df7de0963a3c82889c274dd2d707507d31d50..01cfd69c54c6772ad49a1d81120258f765f435a1 100644 (file)
@@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args)
 
 #ifdef CONFIG_KGDB_KDB
        if (unlikely(kdb_trap_printk)) {
-               r = vkdb_printf(fmt, args);
+               r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
                return r;
        }
 #endif
index 0d7bbe3095ad717c369b142079f16236a58e1af7..0a571e9a0f1d00868c74c5032912c1a0b24dce94 100644 (file)
@@ -326,6 +326,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        special = t->rcu_read_unlock_special;
        if (special.b.need_qs) {
                rcu_preempt_qs();
+               t->rcu_read_unlock_special.b.need_qs = false;
                if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
                        return;
index 8a2e230fb86ad43e3196488961f2b71d1e8b28ed..eae160dd669d9d8d58bb911595c6391b2732edb4 100644 (file)
@@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
         * so we don't have to move tasks around upon policy change,
         * or flail around trying to allocate bandwidth on the fly.
         * A bandwidth exception in __sched_setscheduler() allows
-        * the policy change to proceed.  Thereafter, task_group()
-        * returns &root_task_group, so zero bandwidth is required.
+        * the policy change to proceed.
         */
        free_rt_sched_group(tg);
        tg->rt_se = root_task_group.rt_se;
@@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
        if (tg != &root_task_group)
                return false;
 
-       if (p->sched_class != &fair_sched_class)
-               return false;
-
        /*
         * We can only assume the task group can't go away on us if
         * autogroup_move_group() can see us on ->thread_group list.
index 7052d3fd4e7bd87a29bd144cbca1086621040251..8d0f35debf35657689908a4b37df7230ba7d6710 100644 (file)
@@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x)
         * first without taking the lock so we can
         * return early in the blocking case.
         */
-       if (!ACCESS_ONCE(x->done))
+       if (!READ_ONCE(x->done))
                return 0;
 
        spin_lock_irqsave(&x->wait.lock, flags);
@@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
-       return !!ACCESS_ONCE(x->done);
+       if (!READ_ONCE(x->done))
+               return false;
+
+       /*
+        * If ->done, we need to wait for complete() to release ->wait.lock
+        * otherwise we can end up freeing the completion before complete()
+        * is done referencing it.
+        *
+        * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
+        * the loads of ->done and ->wait.lock such that we cannot observe
+        * the lock before complete() acquires it while observing the ->done
+        * after it's acquired the lock.
+        */
+       smp_rmb();
+       spin_unlock_wait(&x->wait.lock);
+       return true;
 }
 EXPORT_SYMBOL(completion_done);
index 13049aac05a6242e44a59b9ec424f1004448b3d4..f0f831e8a345d835f4cb21bf899c50ab67042b43 100644 (file)
@@ -306,66 +306,6 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-
-               while (unlikely(task_on_rq_migrating(p)))
-                       cpu_relax();
-       }
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-               while (unlikely(task_on_rq_migrating(p)))
-                       cpu_relax();
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -2899,7 +2839,7 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
-static void preempt_schedule_common(void)
+static void __sched notrace preempt_schedule_common(void)
 {
        do {
                __preempt_count_add(PREEMPT_ACTIVE);
@@ -4418,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to);
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-       struct rq *rq = raw_rq();
+       int old_iowait = current->in_iowait;
+       struct rq *rq;
        long ret;
 
+       current->in_iowait = 1;
+       if (old_iowait)
+               blk_schedule_flush_plug(current);
+       else
+               blk_flush_plug(current);
+
        delayacct_blkio_start();
+       rq = raw_rq();
        atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
+       current->in_iowait = old_iowait;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
+
        return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
@@ -7642,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
+       /*
+        * Autogroups do not have RT tasks; see autogroup_create().
+        */
+       if (task_group_is_autogroup(tg))
+               return 0;
+
        for_each_process_thread(g, p) {
                if (rt_task(p) && task_group(p) == tg)
                        return 1;
@@ -7734,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 {
        int i, err = 0;
 
+       /*
+        * Disallowing the root group RT runtime is BAD, it would disallow the
+        * kernel creating (and or operating) RT threads.
+        */
+       if (tg == &root_task_group && rt_runtime == 0)
+               return -EINVAL;
+
+       /* No period doesn't make any sense. */
+       if (rt_period == 0)
+               return -EINVAL;
+
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7790,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
        rt_period = (u64)rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-       if (rt_period == 0)
-               return -EINVAL;
-
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
index a027799ae130d3623ff4351f08c3cf456979bfbc..3fa8fa6d940300c1fbae721503aad2666f72b4e5 100644 (file)
@@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                                                     struct sched_dl_entity,
                                                     dl_timer);
        struct task_struct *p = dl_task_of(dl_se);
+       unsigned long flags;
        struct rq *rq;
-again:
-       rq = task_rq(p);
-       raw_spin_lock(&rq->lock);
 
-       if (rq != task_rq(p)) {
-               /* Task was moved, retrying. */
-               raw_spin_unlock(&rq->lock);
-               goto again;
-       }
+       rq = task_rq_lock(current, &flags);
 
        /*
         * We need to take care of several possible races here:
@@ -541,6 +535,26 @@ again:
 
        sched_clock_tick();
        update_rq_clock(rq);
+
+       /*
+        * If the throttle happened during sched-out; like:
+        *
+        *   schedule()
+        *     deactivate_task()
+        *       dequeue_task_dl()
+        *         update_curr_dl()
+        *           start_dl_timer()
+        *         __dequeue_task_dl()
+        *     prev->on_rq = 0;
+        *
+        * We can be both throttled and !queued. Replenish the counter
+        * but do not enqueue -- wait for our wakeup to do that.
+        */
+       if (!task_on_rq_queued(p)) {
+               replenish_dl_entity(dl_se, dl_se);
+               goto unlock;
+       }
+
        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
        if (dl_task(rq->curr))
                check_preempt_curr_dl(rq, p, 0);
@@ -555,7 +569,7 @@ again:
                push_dl_task(rq);
 #endif
 unlock:
-       raw_spin_unlock(&rq->lock);
+       task_rq_unlock(rq, current, &flags);
 
        return HRTIMER_NORESTART;
 }
@@ -898,6 +912,7 @@ static void yield_task_dl(struct rq *rq)
                rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
+       update_rq_clock(rq);
        update_curr_dl(rq);
 }
 
index 0870db23d79cb3c0578b4f4b3450f5dad02c929e..dc0f435a27794657258623ac8a7f53f7326ff7ac 100644 (file)
@@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { }
 
 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       lockdep_assert_held(&p->pi_lock);
+
+       for (;;) {
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
+       }
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+       __acquires(p->pi_lock)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       for (;;) {
+               raw_spin_lock_irqsave(&p->pi_lock, *flags);
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               /*
+                *      move_queued_task()              task_rq_lock()
+                *
+                *      ACQUIRE (rq->lock)
+                *      [S] ->on_rq = MIGRATING         [L] rq = task_rq()
+                *      WMB (__set_task_cpu())          ACQUIRE (rq->lock);
+                *      [S] ->cpu = new_cpu             [L] task_rq()
+                *                                      [L] ->on_rq
+                *      RELEASE (rq->lock)
+                *
+                * If we observe the old cpu in task_rq_lock, the acquire of
+                * the old rq->lock will fully serialize against the stores.
+                *
+                * If we observe the new cpu in task_rq_lock, the acquire will
+                * pair with the WMB to ensure we must then also see migrating.
+                */
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
+       }
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+       __releases(rq->lock)
+{
+       raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+       __releases(rq->lock)
+       __releases(p->pi_lock)
+{
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
index ea9c881098941ecd9bbb42fa69a3ddb958d2ec41..a03d9cd23ed779b2a38d8bf564bac6860d934f2f 100644 (file)
 #ifndef MPX_DISABLE_MANAGEMENT
 # define MPX_DISABLE_MANAGEMENT(a)     (-EINVAL)
 #endif
+#ifndef GET_FP_MODE
+# define GET_FP_MODE(a)                (-EINVAL)
+#endif
+#ifndef SET_FP_MODE
+# define SET_FP_MODE(a,b)      (-EINVAL)
+#endif
 
 /*
  * this is where the system-wide overflow UID and GID are defined, for
@@ -1102,6 +1108,7 @@ DECLARE_RWSEM(uts_sem);
 /*
  * Work around broken programs that cannot handle "Linux 3.0".
  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+ * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
  */
 static int override_release(char __user *release, size_t len)
 {
@@ -1121,7 +1128,7 @@ static int override_release(char __user *release, size_t len)
                                break;
                        rest++;
                }
-               v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+               v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
                copy = clamp_t(size_t, len, 1, sizeof(buf));
                copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
                ret = copy_to_user(release, buf, copy + 1);
@@ -2219,6 +2226,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        return -EINVAL;
                error = MPX_DISABLE_MANAGEMENT(me);
                break;
+       case PR_SET_FP_MODE:
+               error = SET_FP_MODE(me, arg2);
+               break;
+       case PR_GET_FP_MODE:
+               error = GET_FP_MODE(me);
+               break;
        default:
                error = -EINVAL;
                break;
index 4b585e0fdd22e16288f688baa1051395836461d5..0f60b08a4f073e9246ced1dc3b5de5f50efd7cf4 100644 (file)
@@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
        if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
                return -EPERM;
 
-       if (txc->modes & ADJ_FREQUENCY) {
-               if (LONG_MIN / PPM_SCALE > txc->freq)
+       /*
+        * Check for potential multiplication overflows that can
+        * only happen on 64-bit systems:
+        */
+       if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
+               if (LLONG_MIN / PPM_SCALE > txc->freq)
                        return -EINVAL;
-               if (LONG_MAX / PPM_SCALE < txc->freq)
+               if (LLONG_MAX / PPM_SCALE < txc->freq)
                        return -EINVAL;
        }
 
index cb9758e0ba0cd42d43cb8e7d6f86810006b80265..87da53bb1fefd6fc00c9adf31f99716852771036 100644 (file)
@@ -23,7 +23,7 @@ config HAVE_ARCH_BITREVERSE
          have this capability.
 
 config RATIONAL
-       boolean
+       bool
 
 config GENERIC_STRNCPY_FROM_USER
        bool
@@ -48,14 +48,14 @@ config GENERIC_IOMAP
        select GENERIC_PCI_IOMAP
 
 config GENERIC_IO
-       boolean
+       bool
        default n
 
 config STMP_DEVICE
        bool
 
 config PERCPU_RWSEM
-       boolean
+       bool
 
 config ARCH_USE_CMPXCHG_LOCKREF
        bool
@@ -266,7 +266,7 @@ config DECOMPRESS_LZ4
 # Generic allocator support is selected if needed
 #
 config GENERIC_ALLOCATOR
-       boolean
+       bool
 
 #
 # reed solomon support is select'ed if needed
@@ -275,16 +275,16 @@ config REED_SOLOMON
        tristate
        
 config REED_SOLOMON_ENC8
-       boolean
+       bool
 
 config REED_SOLOMON_DEC8
-       boolean
+       bool
 
 config REED_SOLOMON_ENC16
-       boolean
+       bool
 
 config REED_SOLOMON_DEC16
-       boolean
+       bool
 
 #
 # BCH support is selected if needed
@@ -293,7 +293,7 @@ config BCH
        tristate
 
 config BCH_CONST_PARAMS
-       boolean
+       bool
        help
          Drivers may select this option to force specific constant
          values for parameters 'm' (Galois field order) and 't'
@@ -329,7 +329,7 @@ config BCH_CONST_T
 # Textsearch support is select'ed if needed
 #
 config TEXTSEARCH
-       boolean
+       bool
 
 config TEXTSEARCH_KMP
        tristate
@@ -341,10 +341,10 @@ config TEXTSEARCH_FSM
        tristate
 
 config BTREE
-       boolean
+       bool
 
 config INTERVAL_TREE
-       boolean
+       bool
        help
          Simple, embeddable, interval-tree. Can find the start of an
          overlapping range in log(n) time and then iterate over all
@@ -372,18 +372,18 @@ config ASSOCIATIVE_ARRAY
          for more information.
 
 config HAS_IOMEM
-       boolean
+       bool
        depends on !NO_IOMEM
        select GENERIC_IO
        default y
 
 config HAS_IOPORT_MAP
-       boolean
+       bool
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
 config HAS_DMA
-       boolean
+       bool
        depends on !NO_DMA
        default y
 
index 0d83ea8a9605429aa5e79262bef539e51bfec456..bcce5f149310136a6eaed0c3830c996ac8484008 100644 (file)
 
 #ifdef CONFIG_PCI
 /**
- * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
  * @dev: PCI device that owns the BAR
  * @bar: BAR number
- * @maxlen: length of the memory to map
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
  *
  * Using this function you will get a __iomem address to your device BAR.
  * You can access it using ioread*() and iowrite*(). These functions hide
  * you expect from them in the correct way.
  *
  * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
+ * the complete BAR from offset to the end, pass %0 here.
  * */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+void __iomem *pci_iomap_range(struct pci_dev *dev,
+                             int bar,
+                             unsigned long offset,
+                             unsigned long maxlen)
 {
        resource_size_t start = pci_resource_start(dev, bar);
        resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
-       if (!len || !start)
+       if (len <= offset || !start)
                return NULL;
+       len -= offset;
+       start += offset;
        if (maxlen && len > maxlen)
                len = maxlen;
        if (flags & IORESOURCE_IO)
@@ -43,6 +49,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
        /* What? */
        return NULL;
 }
+EXPORT_SYMBOL(pci_iomap_range);
 
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+       return pci_iomap_range(dev, bar, 0, maxlen);
+}
 EXPORT_SYMBOL(pci_iomap);
 #endif /* CONFIG_PCI */
index 9cc4c4a90d00686228bebdfe55b212c34e98206f..b5344ef4c6846c4f9256c1d0d418f774284c8fcc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/log2.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
@@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl)
 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
                                               size_t nbuckets)
 {
-       struct bucket_table *tbl;
+       struct bucket_table *tbl = NULL;
        size_t size;
        int i;
 
        size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
-       tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+               tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
        if (tbl == NULL)
                tbl = vzalloc(size);
-
        if (tbl == NULL)
                return NULL;
 
@@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  * @ht:                hash table
  * @new_size:  new table size
  */
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
 {
        /* Expand table when exceeding 75% load */
        return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
-              (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
+              (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
 }
-EXPORT_SYMBOL_GPL(rht_grow_above_75);
 
 /**
  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  * @ht:                hash table
  * @new_size:  new table size
  */
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
 {
        /* Shrink table beneath 30% load */
        return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
               (atomic_read(&ht->shift) > ht->p.min_shift);
 }
-EXPORT_SYMBOL_GPL(rht_shrink_below_30);
 
 static void lock_buckets(struct bucket_table *new_tbl,
                         struct bucket_table *old_tbl, unsigned int hash)
@@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht)
                        }
                }
                unlock_buckets(new_tbl, old_tbl, new_hash);
+               cond_resched();
        }
 
        /* Unzip interleaved hash chains */
@@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht)
                                complete = false;
 
                        unlock_buckets(new_tbl, old_tbl, old_hash);
+                       cond_resched();
                }
        }
 
@@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht)
                                   tbl->buckets[new_hash + new_tbl->size]);
 
                unlock_buckets(new_tbl, tbl, new_hash);
+               cond_resched();
        }
 
        /* Publish the new, valid hash table */
@@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work)
        list_for_each_entry(walker, &ht->walkers, list)
                walker->resize = true;
 
-       if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+       if (rht_grow_above_75(ht, tbl->size))
                rhashtable_expand(ht);
-       else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
+       else if (rht_shrink_below_30(ht, tbl->size))
                rhashtable_shrink(ht);
-
 unlock:
        mutex_unlock(&ht->mutex);
 }
 
-static void rhashtable_wakeup_worker(struct rhashtable *ht)
-{
-       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-       struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       size_t size = tbl->size;
-
-       /* Only adjust the table if no resizing is currently in progress. */
-       if (tbl == new_tbl &&
-           ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
-            (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
-               schedule_work(&ht->run_work);
-}
-
 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
-                               struct bucket_table *tbl, u32 hash)
+                               struct bucket_table *tbl,
+                               const struct bucket_table *old_tbl, u32 hash)
 {
+       bool no_resize_running = tbl == old_tbl;
        struct rhash_head *head;
 
        hash = rht_bucket_index(tbl, hash);
@@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
        rcu_assign_pointer(tbl->buckets[hash], obj);
 
        atomic_inc(&ht->nelems);
-
-       rhashtable_wakeup_worker(ht);
+       if (no_resize_running && rht_grow_above_75(ht, tbl->size))
+               schedule_work(&ht->run_work);
 }
 
 /**
@@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
        hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
 
        lock_buckets(tbl, old_tbl, hash);
-       __rhashtable_insert(ht, obj, tbl, hash);
+       __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
        unlock_buckets(tbl, old_tbl, hash);
 
        rcu_read_unlock();
@@ -681,8 +671,11 @@ found:
        unlock_buckets(new_tbl, old_tbl, new_hash);
 
        if (ret) {
+               bool no_resize_running = new_tbl == old_tbl;
+
                atomic_dec(&ht->nelems);
-               rhashtable_wakeup_worker(ht);
+               if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
+                       schedule_work(&ht->run_work);
        }
 
        rcu_read_unlock();
@@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
                goto exit;
        }
 
-       __rhashtable_insert(ht, obj, new_tbl, new_hash);
+       __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
 
 exit:
        unlock_buckets(new_tbl, old_tbl, new_hash);
@@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
        if (!iter->walker)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&iter->walker->list);
+       iter->walker->resize = false;
+
        mutex_lock(&ht->mutex);
        list_add(&iter->walker->list, &ht->walkers);
        mutex_unlock(&ht->mutex);
@@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
        if (!ht->p.hash_rnd)
                get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
 
-       if (ht->p.grow_decision || ht->p.shrink_decision)
-               INIT_WORK(&ht->run_work, rht_deferred_worker);
+       INIT_WORK(&ht->run_work, rht_deferred_worker);
 
        return 0;
 }
@@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht)
 {
        ht->being_destroyed = true;
 
-       if (ht->p.grow_decision || ht->p.shrink_decision)
-               cancel_work_sync(&ht->run_work);
+       cancel_work_sync(&ht->run_work);
 
        mutex_lock(&ht->mutex);
        bucket_table_free(rht_dereference(ht->tbl, ht));
index 1dfeba73fc743718d94551e9356ec8c6580a1fac..67c7593d1dd69c91f646e21e47b589c40c808837 100644 (file)
@@ -191,18 +191,18 @@ error:
        return err;
 }
 
+static struct rhashtable ht;
+
 static int __init test_rht_init(void)
 {
-       struct rhashtable ht;
        struct rhashtable_params params = {
                .nelem_hint = TEST_HT_SIZE,
                .head_offset = offsetof(struct test_obj, node),
                .key_offset = offsetof(struct test_obj, value),
                .key_len = sizeof(int),
                .hashfn = jhash,
+               .max_shift = 1, /* we expand/shrink manually here */
                .nulls_base = (3U << RHT_BASE_SHIFT),
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
        int err;
 
@@ -222,6 +222,11 @@ static int __init test_rht_init(void)
        return err;
 }
 
+static void __exit test_rht_exit(void)
+{
+}
+
 module_init(test_rht_init);
+module_exit(test_rht_exit);
 
 MODULE_LICENSE("GPL v2");
index de5239c152f9fb8ea9f66361fa7fba7cdc1d00d9..a03131b6ba8e7877b537a77fa0c21f5cd005e275 100644 (file)
@@ -129,28 +129,28 @@ config SPARSEMEM_VMEMMAP
         efficient option when sufficient kernel resources are available.
 
 config HAVE_MEMBLOCK
-       boolean
+       bool
 
 config HAVE_MEMBLOCK_NODE_MAP
-       boolean
+       bool
 
 config HAVE_MEMBLOCK_PHYS_MAP
-       boolean
+       bool
 
 config HAVE_GENERIC_RCU_GUP
-       boolean
+       bool
 
 config ARCH_DISCARD_MEMBLOCK
-       boolean
+       bool
 
 config NO_BOOTMEM
-       boolean
+       bool
 
 config MEMORY_ISOLATION
-       boolean
+       bool
 
 config MOVABLE_NODE
-       boolean "Enable to assign a node which has only movable memory"
+       bool "Enable to assign a node which has only movable memory"
        depends on HAVE_MEMBLOCK
        depends on NO_BOOTMEM
        depends on X86_64
@@ -228,12 +228,12 @@ config SPLIT_PTLOCK_CPUS
        default "4"
 
 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
-       boolean
+       bool
 
 #
 # support for memory balloon
 config MEMORY_BALLOON
-       boolean
+       bool
 
 #
 # support for memory balloon compaction
@@ -276,7 +276,7 @@ config MIGRATION
          allocation instead of reclaiming.
 
 config ARCH_ENABLE_HUGEPAGE_MIGRATION
-       boolean
+       bool
 
 config PHYS_ADDR_T_64BIT
        def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
index d18d3a6e7337d944a36e1375b1f50fdce483961f..9fe07692eaad04b5ac63250e486ab7c16d92b6c9 100644 (file)
@@ -5247,7 +5247,7 @@ static int memory_low_show(struct seq_file *m, void *v)
        unsigned long low = ACCESS_ONCE(memcg->low);
 
        if (low == PAGE_COUNTER_MAX)
-               seq_puts(m, "infinity\n");
+               seq_puts(m, "max\n");
        else
                seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
 
@@ -5262,7 +5262,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
        int err;
 
        buf = strstrip(buf);
-       err = page_counter_memparse(buf, "infinity", &low);
+       err = page_counter_memparse(buf, "max", &low);
        if (err)
                return err;
 
@@ -5277,7 +5277,7 @@ static int memory_high_show(struct seq_file *m, void *v)
        unsigned long high = ACCESS_ONCE(memcg->high);
 
        if (high == PAGE_COUNTER_MAX)
-               seq_puts(m, "infinity\n");
+               seq_puts(m, "max\n");
        else
                seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
 
@@ -5292,7 +5292,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
        int err;
 
        buf = strstrip(buf);
-       err = page_counter_memparse(buf, "infinity", &high);
+       err = page_counter_memparse(buf, "max", &high);
        if (err)
                return err;
 
@@ -5307,7 +5307,7 @@ static int memory_max_show(struct seq_file *m, void *v)
        unsigned long max = ACCESS_ONCE(memcg->memory.limit);
 
        if (max == PAGE_COUNTER_MAX)
-               seq_puts(m, "infinity\n");
+               seq_puts(m, "max\n");
        else
                seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
 
@@ -5322,7 +5322,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
        int err;
 
        buf = strstrip(buf);
-       err = page_counter_memparse(buf, "infinity", &max);
+       err = page_counter_memparse(buf, "max", &max);
        if (err)
                return err;
 
@@ -5426,7 +5426,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
        if (memcg == root_mem_cgroup)
                return false;
 
-       if (page_counter_read(&memcg->memory) > memcg->low)
+       if (page_counter_read(&memcg->memory) >= memcg->low)
                return false;
 
        while (memcg != root) {
@@ -5435,7 +5435,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
                if (memcg == root_mem_cgroup)
                        break;
 
-               if (page_counter_read(&memcg->memory) > memcg->low)
+               if (page_counter_read(&memcg->memory) >= memcg->low)
                        return false;
        }
        return true;
index 7296360fc057e5bbf67b9904501fdbe98a97b1b2..3e67e7538ecf048b32463abdf8c7eaf1107dcd15 100644 (file)
@@ -1213,11 +1213,9 @@ static int do_mmap_private(struct vm_area_struct *vma,
        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
                total = point;
                kdebug("try to alloc exact %lu pages", total);
-               base = alloc_pages_exact(len, GFP_KERNEL);
-       } else {
-               base = (void *)__get_free_pages(GFP_KERNEL, order);
        }
 
+       base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
        if (!base)
                goto enomem;
 
index a47f0b229a1aca202b15195c88ab93d52e65064f..7abfa70cdc1ae8767fd663d905372447a7fe6864 100644 (file)
@@ -2353,8 +2353,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                if (ac->high_zoneidx < ZONE_NORMAL)
                        goto out;
                /* The OOM killer does not compensate for light reclaim */
-               if (!(gfp_mask & __GFP_FS))
+               if (!(gfp_mask & __GFP_FS)) {
+                       /*
+                        * XXX: Page reclaim didn't yield anything,
+                        * and the OOM killer can't be invoked, but
+                        * keep looping as per should_alloc_retry().
+                        */
+                       *did_some_progress = 1;
                        goto out;
+               }
                /*
                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
index a63031fa3e0c1e4380e6937aa711df912c9a687f..cf2d0ca010bc52efd5ea86c7f6ba760a5c3ef286 100644 (file)
@@ -1455,6 +1455,9 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 
 bool shmem_mapping(struct address_space *mapping)
 {
+       if (!mapping->host)
+               return false;
+
        return mapping->host->i_sb->s_op == &shmem_ops;
 }
 
@@ -2319,8 +2322,8 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
 
 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
 {
-       bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
-       bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+       bool old_is_dir = d_is_dir(old_dentry);
+       bool new_is_dir = d_is_dir(new_dentry);
 
        if (old_dir != new_dir && old_is_dir != new_is_dir) {
                if (old_is_dir) {
index 1dcfec8b49f3f4a725b853f8ce9ee202bab5c7f1..f196552ec3c41e13f2891f3a29a43844d56c2574 100644 (file)
@@ -792,5 +792,5 @@ void vlan_setup(struct net_device *dev)
        dev->destructor         = vlan_dev_free;
        dev->ethtool_ops        = &vlan_ethtool_ops;
 
-       memset(dev->broadcast, 0, ETH_ALEN);
+       eth_zero_addr(dev->broadcast);
 }
index daa749c8b3fbea2cc319ceb6a1ce4a9c20d9eece..d8e376a5f0f13d9eed735f62b81367b7bc685ebe 100644 (file)
@@ -524,6 +524,12 @@ static int p9_virtio_probe(struct virtio_device *vdev)
        int err;
        struct virtio_chan *chan;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
        if (!chan) {
                pr_err("Failed to allocate virtio 9P channel\n");
index ff9ffc17fa0e1fc438e9e4ebec20376ddd6ec969..44dd5786ee91da16ae920d3c9f62b1f4bac353c8 100644 (file)
@@ -231,18 +231,18 @@ source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
 
 config RPS
-       boolean
+       bool
        depends on SMP && SYSFS
        default y
 
 config RFS_ACCEL
-       boolean
+       bool
        depends on RPS
        select CPU_RMAP
        default y
 
 config XPS
-       boolean
+       bool
        depends on SMP
        default y
 
@@ -254,18 +254,18 @@ config CGROUP_NET_PRIO
          a per-interface basis.
 
 config CGROUP_NET_CLASSID
-       boolean "Network classid cgroup"
+       bool "Network classid cgroup"
        depends on CGROUPS
        ---help---
          Cgroup subsystem for use as general purpose socket classid marker that is
          being used in cls_cgroup and for netfilter matching.
 
 config NET_RX_BUSY_POLL
-       boolean
+       bool
        default y
 
 config BQL
-       boolean
+       bool
        depends on SYSFS
        select DQL
        default y
@@ -282,7 +282,7 @@ config BPF_JIT
          this feature changing /proc/sys/net/core/bpf_jit_enable
 
 config NET_FLOW_LIMIT
-       boolean
+       bool
        depends on RPS
        default y
        ---help---
index 38704bdf941ad6697db492959a909ca716ea9d4c..3995613e5510cfd6a05ebfe5cbbd9da3bcb5589f 100644 (file)
@@ -69,7 +69,7 @@ obj-$(CONFIG_BATMAN_ADV)      += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
 obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
 obj-$(CONFIG_VSOCKETS) += vmw_vsock/
-obj-$(CONFIG_NET_MPLS_GSO)     += mpls/
+obj-$(CONFIG_MPLS)             += mpls/
 obj-$(CONFIG_HSR)              += hsr/
 ifneq ($(CONFIG_NET_SWITCHDEV),)
 obj-y                          += switchdev/
index d1c55d8dd0a2538eaabe403ceeb26896c00adf66..8ad3ec2610b6499b92b2f3bc97ac02d2d043dd45 100644 (file)
@@ -141,7 +141,7 @@ static void __aarp_send_query(struct aarp_entry *a)
        eah->pa_src_net  = sat->s_net;
        eah->pa_src_node = sat->s_node;
 
-       memset(eah->hw_dst, '\0', ETH_ALEN);
+       eth_zero_addr(eah->hw_dst);
 
        eah->pa_dst_zero = 0;
        eah->pa_dst_net  = a->target_addr.s_net;
@@ -189,7 +189,7 @@ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us,
        eah->pa_src_node = us->s_node;
 
        if (!sha)
-               memset(eah->hw_dst, '\0', ETH_ALEN);
+               eth_zero_addr(eah->hw_dst);
        else
                ether_addr_copy(eah->hw_dst, sha);
 
@@ -239,7 +239,7 @@ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us)
        eah->pa_src_net  = us->s_net;
        eah->pa_src_node = us->s_node;
 
-       memset(eah->hw_dst, '\0', ETH_ALEN);
+       eth_zero_addr(eah->hw_dst);
 
        eah->pa_dst_zero = 0;
        eah->pa_dst_net  = us->s_net;
index 4b98f897044aa6a364392bc1ec5b68a2a672a2d2..cd3b37989057fd0b1c5a8b1f49a224fe96d7ba87 100644 (file)
@@ -2001,7 +2001,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
                if (entry == NULL)
                        goto out;
                memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
-               memset(entry->mac_addr, 0, ETH_ALEN);
+               eth_zero_addr(entry->mac_addr);
                entry->recv_vcc = vcc;
                entry->old_recv_push = old_push;
                entry->status = ESI_UNKNOWN;
@@ -2086,7 +2086,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
        entry->vcc = vcc;
        entry->old_push = old_push;
        memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
-       memset(entry->mac_addr, 0, ETH_ALEN);
+       eth_zero_addr(entry->mac_addr);
        entry->status = ESI_UNKNOWN;
        hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
        entry->timer.expires = jiffies + priv->vcc_timeout_period;
index 523bce72f698ef2a34cfc66da4dbe69cde954fe9..4fd6af47383a014b72b3377fc1556c6bff18e304 100644 (file)
 #include "resources.h"
 #include "signaling.h"
 
-#undef WAIT_FOR_DEMON          /* #define this if system calls on SVC sockets
-                                  should block until the demon runs.
-                                  Danger: may cause nasty hangs if the demon
-                                  crashes. */
-
 struct atm_vcc *sigd = NULL;
-#ifdef WAIT_FOR_DEMON
-static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
-#endif
 
 static void sigd_put_skb(struct sk_buff *skb)
 {
-#ifdef WAIT_FOR_DEMON
-       DECLARE_WAITQUEUE(wait, current);
-
-       add_wait_queue(&sigd_sleep, &wait);
-       while (!sigd) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               pr_debug("atmsvc: waiting for signaling daemon...\n");
-               schedule();
-       }
-       current->state = TASK_RUNNING;
-       remove_wait_queue(&sigd_sleep, &wait);
-#else
        if (!sigd) {
                pr_debug("atmsvc: no signaling daemon\n");
                kfree_skb(skb);
                return;
        }
-#endif
        atm_force_charge(sigd, skb->truesize);
        skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
        sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
@@ -261,8 +240,5 @@ int sigd_attach(struct atm_vcc *vcc)
        vcc_insert_socket(sk_atm(vcc));
        set_bit(ATM_VF_META, &vcc->flags);
        set_bit(ATM_VF_READY, &vcc->flags);
-#ifdef WAIT_FOR_DEMON
-       wake_up(&sigd_sleep);
-#endif
        return 0;
 }
index e030c64ebfb77bfc41ca6edf6b3211b1765485ff..7c646bb2c6f70246e83c8701f8771b716f8194d7 100644 (file)
@@ -100,7 +100,7 @@ static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
        return -AX25_HEADER_LEN;        /* Unfinished header */
 }
 
-static int ax25_neigh_xmit(struct sk_buff *skb)
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
 {
        struct sk_buff *ourskb;
        unsigned char *bp  = skb->data;
@@ -210,56 +210,7 @@ put:
        if (route)
                ax25_put_route(route);
 
-       return 1;
-}
-
-static int ax25_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
-{
-       /* Except for calling ax25_neigh_xmit instead of
-        * dev_queue_xmit this is neigh_resolve_output.
-        */
-       int rc = 0;
-
-       if (!neigh_event_send(neigh, skb)) {
-               int err;
-               struct net_device *dev = neigh->dev;
-               unsigned int seq;
-
-               do {
-                       __skb_pull(skb, skb_network_offset(skb));
-                       seq = read_seqbegin(&neigh->ha_lock);
-                       err = dev_hard_header(skb, dev, ntohs(skb->protocol),
-                                             neigh->ha, NULL, skb->len);
-               } while (read_seqretry(&neigh->ha_lock, seq));
-
-               if (err >= 0) {
-                       ax25_neigh_xmit(skb);
-               } else
-                       goto out_kfree_skb;
-       }
-out:
-       return rc;
-
-out_kfree_skb:
-       rc = -EINVAL;
-       kfree_skb(skb);
-       goto out;
-}
-
-int ax25_neigh_construct(struct neighbour *neigh)
-{
-       /* This trouble could be saved if ax25 would right a proper
-        * dev_queue_xmit function.
-        */
-       struct ax25_neigh_priv *priv = neighbour_priv(neigh);
-
-       if (neigh->tbl->family != AF_INET)
-               return -EINVAL;
-
-       priv->ops = *neigh->ops;
-       priv->ops.output = ax25_neigh_output;
-       priv->ops.connected_output = ax25_neigh_output;
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 #else  /* INET */
@@ -271,9 +222,10 @@ static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
        return -AX25_HEADER_LEN;
 }
 
-int ax25_neigh_construct(struct neighbour *neigh)
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
 {
-       return 0;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 #endif
 
@@ -282,5 +234,5 @@ const struct header_ops ax25_header_ops = {
 };
 
 EXPORT_SYMBOL(ax25_header_ops);
-EXPORT_SYMBOL(ax25_neigh_construct);
+EXPORT_SYMBOL(ax25_ip_xmit);
 
index 4b488ec261054830c6f45bfe3a6a3a9cfd65f316..6ceb5d36a32bdc375e635d34085a9b016568e16a 100644 (file)
@@ -218,7 +218,7 @@ static const struct net_device_ops bnep_netdev_ops = {
 void bnep_net_setup(struct net_device *dev)
 {
 
-       memset(dev->broadcast, 0xff, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
        dev->addr_len = ETH_ALEN;
 
        ether_setup(dev);
index fb57ab6b24f9ef8feea780179ad2e8284a9e532f..02c24cf63c344a3b15bcf87369da7f847150fab3 100644 (file)
@@ -190,6 +190,8 @@ static int __init br_init(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+
        err = stp_proto_register(&br_stp_proto);
        if (err < 0) {
                pr_err("bridge: can't register sap for STP\n");
index 32541d4f72e83d49258d73a460ee5ea2250ce835..3304a544233174a3d1c7474cb19fffc05483be78 100644 (file)
@@ -186,6 +186,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
                /* Do not flood to ports that enable proxy ARP */
                if (p->flags & BR_PROXYARP)
                        continue;
+               if ((p->flags & BR_PROXYARP_WIFI) &&
+                   BR_INPUT_SKB_CB(skb)->proxyarp_replied)
+                       continue;
 
                prev = maybe_deliver(prev, p, skb, __packet_hook);
                if (IS_ERR(prev))
index e2aa7be3a847f448a404e0a43f6d1a09f1a0517a..052c5ebbc9472c833df81e28a4895b96ba3f389c 100644 (file)
@@ -60,7 +60,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
 }
 
 static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
-                           u16 vid)
+                           u16 vid, struct net_bridge_port *p)
 {
        struct net_device *dev = br->dev;
        struct neighbour *n;
@@ -68,6 +68,8 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
        u8 *arpptr, *sha;
        __be32 sip, tip;
 
+       BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
+
        if (dev->flags & IFF_NOARP)
                return;
 
@@ -105,9 +107,12 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
                }
 
                f = __br_fdb_get(br, n->ha, vid);
-               if (f)
+               if (f && ((p->flags & BR_PROXYARP) ||
+                         (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) {
                        arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
                                 sha, n->ha, sha);
+                       BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+               }
 
                neigh_release(n);
        }
@@ -153,12 +158,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
 
        dst = NULL;
 
-       if (is_broadcast_ether_addr(dest)) {
-               if (IS_ENABLED(CONFIG_INET) &&
-                   p->flags & BR_PROXYARP &&
-                   skb->protocol == htons(ETH_P_ARP))
-                       br_do_proxy_arp(skb, br, vid);
+       if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
+               br_do_proxy_arp(skb, br, vid, p);
 
+       if (is_broadcast_ether_addr(dest)) {
                skb2 = skb;
                unicast = false;
        } else if (is_multicast_ether_addr(dest)) {
index 3de0eefe2b82ccf440bb757ac956032d229eec6c..8bc6b67457dc0b65b44d851d4d5a7694f3476f94 100644 (file)
@@ -81,17 +81,19 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
        struct net_port_vlans *pv;
        int num_vlan_infos;
 
+       rcu_read_lock();
        if (br_port_exists(dev))
-               pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
+               pv = nbp_get_vlan_info(br_port_get_rcu(dev));
        else if (dev->priv_flags & IFF_EBRIDGE)
                pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
        else
-               return 0;
-
-       if (!pv)
-               return 0;
+               pv = NULL;
+       if (pv)
+               num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
+       else
+               num_vlan_infos = 0;
+       rcu_read_unlock();
 
-       num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
        if (!num_vlan_infos)
                return 0;
 
@@ -141,7 +143,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
            nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
            nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
            nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
-           nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)))
+           nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
+           nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
+                      !!(p->flags & BR_PROXYARP_WIFI)))
                return -EMSGSIZE;
 
        return 0;
@@ -551,6 +555,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
        br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
        br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
        br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
+       br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
 
        if (tb[IFLA_BRPORT_COST]) {
                err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
index d63fc17fe4f4402494b002683c28016d51a01cb1..f0a0438dbd6d78fd89dd4d9353c31895379d7e29 100644 (file)
@@ -305,6 +305,7 @@ struct br_input_skb_cb {
 #endif
 
        u16 frag_max_size;
+       bool proxyarp_replied;
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        bool vlan_filtered;
index 2de5d91199e8172f9356b104bbcfa772ff460d45..4905845a94e92f125accc4d1a4cf16689fcd3990 100644 (file)
@@ -171,6 +171,7 @@ BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
 BRPORT_ATTR_FLAG(learning, BR_LEARNING);
 BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
 BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
+BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI);
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -215,6 +216,7 @@ static const struct brport_attribute *brport_attrs[] = {
        &brport_attr_multicast_fast_leave,
 #endif
        &brport_attr_proxyarp,
+       &brport_attr_proxyarp_wifi,
        NULL
 };
 
index 8bc7caa28e64ddc32d30f0054ac9dee708ba8f3f..434ba8557826ddf160fafd08a04949d546201692 100644 (file)
@@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
        u16 tmp;
        u16 len;
        u16 hdrchks;
-       u16 pktchks;
+       int pktchks;
        struct cffrml *this;
        this = container_obj(layr);
 
index 1be0b521ac490143e60e3b2d5b02d7fd68c24087..f6c3b2137eeaacdc5cf38d1eac03e018d8f2d37b 100644 (file)
@@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt)
        return skb->len;
 }
 
-inline u16 cfpkt_iterate(struct cfpkt *pkt,
-                        u16 (*iter_func)(u16, void *, u16),
-                        u16 data)
+int cfpkt_iterate(struct cfpkt *pkt,
+                 u16 (*iter_func)(u16, void *, u16),
+                 u16 data)
 {
        /*
         * Don't care about the performance hit of linearizing,
index 5d5ab67f516dfa16ee5d86d6c312cf0a201bc3a4..ec565508e904113e65329b89dec5952bf5d41075 100644 (file)
@@ -239,6 +239,8 @@ enum {
        Opt_nocrc,
        Opt_cephx_require_signatures,
        Opt_nocephx_require_signatures,
+       Opt_tcp_nodelay,
+       Opt_notcp_nodelay,
 };
 
 static match_table_t opt_tokens = {
@@ -259,6 +261,8 @@ static match_table_t opt_tokens = {
        {Opt_nocrc, "nocrc"},
        {Opt_cephx_require_signatures, "cephx_require_signatures"},
        {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
+       {Opt_tcp_nodelay, "tcp_nodelay"},
+       {Opt_notcp_nodelay, "notcp_nodelay"},
        {-1, NULL}
 };
 
@@ -457,6 +461,7 @@ ceph_parse_options(char *options, const char *dev_name,
                case Opt_nocrc:
                        opt->flags |= CEPH_OPT_NOCRC;
                        break;
+
                case Opt_cephx_require_signatures:
                        opt->flags &= ~CEPH_OPT_NOMSGAUTH;
                        break;
@@ -464,6 +469,13 @@ ceph_parse_options(char *options, const char *dev_name,
                        opt->flags |= CEPH_OPT_NOMSGAUTH;
                        break;
 
+               case Opt_tcp_nodelay:
+                       opt->flags |= CEPH_OPT_TCP_NODELAY;
+                       break;
+               case Opt_notcp_nodelay:
+                       opt->flags &= ~CEPH_OPT_TCP_NODELAY;
+                       break;
+
                default:
                        BUG_ON(token);
                }
@@ -518,10 +530,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
        /* msgr */
        if (ceph_test_opt(client, MYIP))
                myaddr = &client->options->my_addr;
+
        ceph_messenger_init(&client->msgr, myaddr,
                client->supported_features,
                client->required_features,
-               ceph_test_opt(client, NOCRC));
+               ceph_test_opt(client, NOCRC),
+               ceph_test_opt(client, TCP_NODELAY));
 
        /* subsystems */
        err = ceph_monc_init(&client->monc, client);
index 30560202f57b481fcee064a242ca13b55cd16f11..139a9cb19b0c6ca9b07e1184241c33b08cdd141f 100644 (file)
@@ -42,17 +42,3 @@ const char *ceph_osd_state_name(int s)
                return "???";
        }
 }
-
-const char *ceph_pool_op_name(int op)
-{
-       switch (op) {
-       case POOL_OP_CREATE: return "create";
-       case POOL_OP_DELETE: return "delete";
-       case POOL_OP_AUID_CHANGE: return "auid change";
-       case POOL_OP_CREATE_SNAP: return "create snap";
-       case POOL_OP_DELETE_SNAP: return "delete snap";
-       case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
-       case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
-       }
-       return "???";
-}
index d2d525529f8770452412e21b51cd55b3559e6192..14d9995097cc84ea33f8c6811be18bc3919fcbbd 100644 (file)
@@ -127,8 +127,6 @@ static int monc_show(struct seq_file *s, void *p)
                op = le16_to_cpu(req->request->hdr.type);
                if (op == CEPH_MSG_STATFS)
                        seq_printf(s, "%llu statfs\n", req->tid);
-               else if (op == CEPH_MSG_POOLOP)
-                       seq_printf(s, "%llu poolop\n", req->tid);
                else if (op == CEPH_MSG_MON_GET_VERSION)
                        seq_printf(s, "%llu mon_get_version", req->tid);
                else
index 33a2f201e460e1585f82db94c8f3f90f01bbacdf..6b3f54ed65ba6fc4ff392877f662ef5dddeb8939 100644 (file)
@@ -510,6 +510,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
                return ret;
        }
 
+       if (con->msgr->tcp_nodelay) {
+               int optval = 1;
+
+               ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
+                                       (char *)&optval, sizeof(optval));
+               if (ret)
+                       pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
+                              ret);
+       }
+
        sk_set_memalloc(sock->sk);
 
        con->sock = sock;
@@ -2922,7 +2932,8 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
                        struct ceph_entity_addr *myaddr,
                        u64 supported_features,
                        u64 required_features,
-                       bool nocrc)
+                       bool nocrc,
+                       bool tcp_nodelay)
 {
        msgr->supported_features = supported_features;
        msgr->required_features = required_features;
@@ -2937,6 +2948,7 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
        get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
        encode_my_addr(msgr);
        msgr->nocrc = nocrc;
+       msgr->tcp_nodelay = tcp_nodelay;
 
        atomic_set(&msgr->stopping, 0);
 
index f2148e22b14897727faeba297e045f2b933a52b1..2b3cf05e87b0fc44a150f1c314f2db98f1ab7dfc 100644 (file)
@@ -410,7 +410,7 @@ out_unlocked:
 }
 
 /*
- * generic requests (e.g., statfs, poolop)
+ * generic requests (currently statfs, mon_get_version)
  */
 static struct ceph_mon_generic_request *__lookup_generic_req(
        struct ceph_mon_client *monc, u64 tid)
@@ -569,7 +569,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
        return;
 
 bad:
-       pr_err("corrupt generic reply, tid %llu\n", tid);
+       pr_err("corrupt statfs reply, tid %llu\n", tid);
        ceph_msg_dump(msg);
 }
 
@@ -588,7 +588,6 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
 
        kref_init(&req->kref);
        req->buf = buf;
-       req->buf_len = sizeof(*buf);
        init_completion(&req->completion);
 
        err = -ENOMEM;
@@ -611,7 +610,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
        err = do_generic_request(monc, req);
 
 out:
-       kref_put(&req->kref, release_generic_request);
+       put_generic_request(req);
        return err;
 }
 EXPORT_SYMBOL(ceph_monc_do_statfs);
@@ -647,7 +646,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
 
        return;
 bad:
-       pr_err("corrupt mon_get_version reply\n");
+       pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
        ceph_msg_dump(msg);
 }
 
@@ -670,7 +669,6 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
 
        kref_init(&req->kref);
        req->buf = newest;
-       req->buf_len = sizeof(*newest);
        init_completion(&req->completion);
 
        req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
@@ -701,133 +699,11 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
 
        mutex_unlock(&monc->mutex);
 out:
-       kref_put(&req->kref, release_generic_request);
+       put_generic_request(req);
        return err;
 }
 EXPORT_SYMBOL(ceph_monc_do_get_version);
 
-/*
- * pool ops
- */
-static int get_poolop_reply_buf(const char *src, size_t src_len,
-                               char *dst, size_t dst_len)
-{
-       u32 buf_len;
-
-       if (src_len != sizeof(u32) + dst_len)
-               return -EINVAL;
-
-       buf_len = le32_to_cpu(*(__le32 *)src);
-       if (buf_len != dst_len)
-               return -EINVAL;
-
-       memcpy(dst, src + sizeof(u32), dst_len);
-       return 0;
-}
-
-static void handle_poolop_reply(struct ceph_mon_client *monc,
-                               struct ceph_msg *msg)
-{
-       struct ceph_mon_generic_request *req;
-       struct ceph_mon_poolop_reply *reply = msg->front.iov_base;
-       u64 tid = le64_to_cpu(msg->hdr.tid);
-
-       if (msg->front.iov_len < sizeof(*reply))
-               goto bad;
-       dout("handle_poolop_reply %p tid %llu\n", msg, tid);
-
-       mutex_lock(&monc->mutex);
-       req = __lookup_generic_req(monc, tid);
-       if (req) {
-               if (req->buf_len &&
-                   get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply),
-                                    msg->front.iov_len - sizeof(*reply),
-                                    req->buf, req->buf_len) < 0) {
-                       mutex_unlock(&monc->mutex);
-                       goto bad;
-               }
-               req->result = le32_to_cpu(reply->reply_code);
-               get_generic_request(req);
-       }
-       mutex_unlock(&monc->mutex);
-       if (req) {
-               complete(&req->completion);
-               put_generic_request(req);
-       }
-       return;
-
-bad:
-       pr_err("corrupt generic reply, tid %llu\n", tid);
-       ceph_msg_dump(msg);
-}
-
-/*
- * Do a synchronous pool op.
- */
-static int do_poolop(struct ceph_mon_client *monc, u32 op,
-                       u32 pool, u64 snapid,
-                       char *buf, int len)
-{
-       struct ceph_mon_generic_request *req;
-       struct ceph_mon_poolop *h;
-       int err;
-
-       req = kzalloc(sizeof(*req), GFP_NOFS);
-       if (!req)
-               return -ENOMEM;
-
-       kref_init(&req->kref);
-       req->buf = buf;
-       req->buf_len = len;
-       init_completion(&req->completion);
-
-       err = -ENOMEM;
-       req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
-                                   true);
-       if (!req->request)
-               goto out;
-       req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
-                                 true);
-       if (!req->reply)
-               goto out;
-
-       /* fill out request */
-       req->request->hdr.version = cpu_to_le16(2);
-       h = req->request->front.iov_base;
-       h->monhdr.have_version = 0;
-       h->monhdr.session_mon = cpu_to_le16(-1);
-       h->monhdr.session_mon_tid = 0;
-       h->fsid = monc->monmap->fsid;
-       h->pool = cpu_to_le32(pool);
-       h->op = cpu_to_le32(op);
-       h->auid = 0;
-       h->snapid = cpu_to_le64(snapid);
-       h->name_len = 0;
-
-       err = do_generic_request(monc, req);
-
-out:
-       kref_put(&req->kref, release_generic_request);
-       return err;
-}
-
-int ceph_monc_create_snapid(struct ceph_mon_client *monc,
-                           u32 pool, u64 *snapid)
-{
-       return do_poolop(monc,  POOL_OP_CREATE_UNMANAGED_SNAP,
-                                  pool, 0, (char *)snapid, sizeof(*snapid));
-
-}
-EXPORT_SYMBOL(ceph_monc_create_snapid);
-
-int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
-                           u32 pool, u64 snapid)
-{
-       return do_poolop(monc,  POOL_OP_CREATE_UNMANAGED_SNAP,
-                                  pool, snapid, NULL, 0);
-
-}
-
 /*
  * Resend pending generic requests.
  */
@@ -1112,10 +988,6 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
                handle_get_version_reply(monc, msg);
                break;
 
-       case CEPH_MSG_POOLOP_REPLY:
-               handle_poolop_reply(monc, msg);
-               break;
-
        case CEPH_MSG_MON_MAP:
                ceph_monc_handle_map(monc, msg);
                break;
@@ -1154,7 +1026,6 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
        case CEPH_MSG_MON_SUBSCRIBE_ACK:
                m = ceph_msg_get(monc->m_subscribe_ack);
                break;
-       case CEPH_MSG_POOLOP_REPLY:
        case CEPH_MSG_STATFS_REPLY:
                return get_generic_reply(con, hdr, skip);
        case CEPH_MSG_AUTH_REPLY:
index 53299c7b0ca4a516ba48a7c886b8b9539bb29518..41a4abc7e98eebfd36487d6d381f680732d4cd68 100644 (file)
@@ -1035,10 +1035,11 @@ static void put_osd(struct ceph_osd *osd)
 {
        dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
             atomic_read(&osd->o_ref) - 1);
-       if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
+       if (atomic_dec_and_test(&osd->o_ref)) {
                struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
 
-               ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+               if (osd->o_auth.authorizer)
+                       ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
                kfree(osd);
        }
 }
@@ -1048,14 +1049,24 @@ static void put_osd(struct ceph_osd *osd)
  */
 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
 {
-       dout("__remove_osd %p\n", osd);
+       dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
        WARN_ON(!list_empty(&osd->o_requests));
        WARN_ON(!list_empty(&osd->o_linger_requests));
 
-       rb_erase(&osd->o_node, &osdc->osds);
        list_del_init(&osd->o_osd_lru);
-       ceph_con_close(&osd->o_con);
-       put_osd(osd);
+       rb_erase(&osd->o_node, &osdc->osds);
+       RB_CLEAR_NODE(&osd->o_node);
+}
+
+static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+{
+       dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
+
+       if (!RB_EMPTY_NODE(&osd->o_node)) {
+               ceph_con_close(&osd->o_con);
+               __remove_osd(osdc, osd);
+               put_osd(osd);
+       }
 }
 
 static void remove_all_osds(struct ceph_osd_client *osdc)
@@ -1065,7 +1076,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
        while (!RB_EMPTY_ROOT(&osdc->osds)) {
                struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
                                                struct ceph_osd, o_node);
-               __remove_osd(osdc, osd);
+               remove_osd(osdc, osd);
        }
        mutex_unlock(&osdc->request_mutex);
 }
@@ -1106,7 +1117,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
                if (time_before(jiffies, osd->lru_ttl))
                        break;
-               __remove_osd(osdc, osd);
+               remove_osd(osdc, osd);
        }
        mutex_unlock(&osdc->request_mutex);
 }
@@ -1121,8 +1132,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
        dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
        if (list_empty(&osd->o_requests) &&
            list_empty(&osd->o_linger_requests)) {
-               __remove_osd(osdc, osd);
-
+               remove_osd(osdc, osd);
                return -ENODEV;
        }
 
@@ -1926,6 +1936,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
 {
        struct rb_node *p, *n;
 
+       dout("%s %p\n", __func__, osdc);
        for (p = rb_first(&osdc->osds); p; p = n) {
                struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
 
index 49c6a8fb9f094fa7ef22a64833e0b9c8cc98632d..478443182bbe0103d4ffa46b37fd32f07c24eebe 100644 (file)
@@ -711,24 +711,18 @@ static unsigned char nas[21] = {
 
 COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
        return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
                       unsigned int, vlen, unsigned int, flags)
 {
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
        return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                              flags | MSG_CMSG_COMPAT);
 }
 
 COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
        return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
@@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
        int datagrams;
        struct timespec ktspec;
 
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
-
        if (timeout == NULL)
                return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                                      flags | MSG_CMSG_COMPAT, NULL);
index 8f9710c62e20d58bcdcec3d184ca6344fbe5a57c..962ee9d719641291853715f366717bf1626e115c 100644 (file)
@@ -946,7 +946,7 @@ bool dev_valid_name(const char *name)
                return false;
 
        while (*name) {
-               if (*name == '/' || isspace(*name))
+               if (*name == '/' || *name == ':' || isspace(*name))
                        return false;
                name++;
        }
index eb0c3ace7458cb45c37db30700e495257a804f16..1d00b89229024b45fef3955cd27221fafe2bfb74 100644 (file)
@@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_RXALL_BIT] =            "rx-all",
        [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
        [NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
+       [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
 };
 
 static const char
index 0c08062d1796337f25b9dbc2cbce68d5e3194037..1e2f46a69d50196f71f1fb7ae97e7732c2a8a059 100644 (file)
@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
        return 0;
 
 nla_put_failure:
+       kfree(d->xstats);
+       d->xstats = NULL;
+       d->xstats_len = 0;
        spin_unlock_bh(d->lock);
        return -1;
 }
@@ -305,7 +308,9 @@ int
 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
 {
        if (d->compat_xstats) {
-               d->xstats = st;
+               d->xstats = kmemdup(st, len, GFP_ATOMIC);
+               if (!d->xstats)
+                       goto err_out;
                d->xstats_len = len;
        }
 
@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
                return gnet_stats_copy(d, TCA_STATS_APP, st, len);
 
        return 0;
+
+err_out:
+       d->xstats_len = 0;
+       spin_unlock_bh(d->lock);
+       return -1;
 }
 EXPORT_SYMBOL(gnet_stats_copy_app);
 
@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
                        return -1;
        }
 
+       kfree(d->xstats);
+       d->xstats = NULL;
+       d->xstats_len = 0;
        spin_unlock_bh(d->lock);
        return 0;
 }
index 0f48ea3affed3f53871a2ab01972b32a01c6ed31..ad07990e943da544dd292e29bf132adb6f2cc0c8 100644 (file)
@@ -397,25 +397,15 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
                               struct net_device *dev)
 {
        struct neighbour *n;
-       int key_len = tbl->key_len;
-       u32 hash_val;
-       struct neigh_hash_table *nht;
 
        NEIGH_CACHE_STAT_INC(tbl, lookups);
 
        rcu_read_lock_bh();
-       nht = rcu_dereference_bh(tbl->nht);
-       hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
-
-       for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
-            n != NULL;
-            n = rcu_dereference_bh(n->next)) {
-               if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
-                       if (!atomic_inc_not_zero(&n->refcnt))
-                               n = NULL;
-                       NEIGH_CACHE_STAT_INC(tbl, hits);
-                       break;
-               }
+       n = __neigh_lookup_noref(tbl, pkey, dev);
+       if (n) {
+               if (!atomic_inc_not_zero(&n->refcnt))
+                       n = NULL;
+               NEIGH_CACHE_STAT_INC(tbl, hits);
        }
 
        rcu_read_unlock_bh();
@@ -2401,6 +2391,40 @@ void __neigh_for_each_release(struct neigh_table *tbl,
 }
 EXPORT_SYMBOL(__neigh_for_each_release);
 
+int neigh_xmit(int index, struct net_device *dev,
+              const void *addr, struct sk_buff *skb)
+{
+       int err = -EAFNOSUPPORT;
+       if (likely(index < NEIGH_NR_TABLES)) {
+               struct neigh_table *tbl;
+               struct neighbour *neigh;
+
+               tbl = neigh_tables[index];
+               if (!tbl)
+                       goto out;
+               neigh = __neigh_lookup_noref(tbl, addr, dev);
+               if (!neigh)
+                       neigh = __neigh_create(tbl, addr, dev, false);
+               err = PTR_ERR(neigh);
+               if (IS_ERR(neigh))
+                       goto out_kfree_skb;
+               err = neigh->output(neigh, skb);
+       }
+       else if (index == NEIGH_LINK_TABLE) {
+               err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+                                     addr, NULL, skb->len);
+               if (err < 0)
+                       goto out_kfree_skb;
+               err = dev_queue_xmit(skb);
+       }
+out:
+       return err;
+out_kfree_skb:
+       kfree_skb(skb);
+       goto out;
+}
+EXPORT_SYMBOL(neigh_xmit);
+
 #ifdef CONFIG_PROC_FS
 
 static struct neighbour *neigh_get_first(struct seq_file *seq)
index b4899f5b7388e8f0c825a433a1f633d6b087a0f9..508155b283ddcc73a967a2bc8068e67cb8cada7d 100644 (file)
@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
                        return len;
 
                i += len;
+               if ((value > 1) &&
+                   (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+                       return -ENOTSUPP;
                pkt_dev->burst = value < 1 ? 1 : value;
                sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
                return count;
index ab293a3066b34bc4f6af71701f0c12b9ab6e5a34..25b4b5d2348595d0a609733b4e4fc6d83a282c93 100644 (file)
@@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        s_h = cb->args[0];
        s_idx = cb->args[1];
 
-       rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
        /* A hack to preserve kernel<->userspace interface.
@@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
-               hlist_for_each_entry_rcu(dev, head, index_hlist) {
+               hlist_for_each_entry(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                        err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1344,7 +1343,6 @@ cont:
                }
        }
 out:
-       rcu_read_unlock();
        cb->args[1] = idx;
        cb->args[0] = h;
 
@@ -2012,8 +2010,8 @@ replay:
        }
 
        if (1) {
-               struct nlattr *attr[ops ? ops->maxtype + 1 : 0];
-               struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
+               struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
+               struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
                struct nlattr **data = NULL;
                struct nlattr **slave_data = NULL;
                struct net *dest_net, *link_net = NULL;
@@ -2122,6 +2120,10 @@ replay:
                if (IS_ERR(dest_net))
                        return PTR_ERR(dest_net);
 
+               err = -EPERM;
+               if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
+                       goto out;
+
                if (tb[IFLA_LINK_NETNSID]) {
                        int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
 
@@ -2130,6 +2132,9 @@ replay:
                                err =  -EINVAL;
                                goto out;
                        }
+                       err = -EPERM;
+                       if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
+                               goto out;
                }
 
                dev = rtnl_create_link(link_net ? : dest_net, ifname,
index 374e43bc6b804b5967398c474f5a4c48690da3f1..47c32413d5b94c4911939f98772a8ab1d98c3740 100644 (file)
@@ -3206,10 +3206,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
        unsigned int offset = skb_gro_offset(skb);
        unsigned int headlen = skb_headlen(skb);
-       struct sk_buff *nskb, *lp, *p = *head;
        unsigned int len = skb_gro_len(skb);
+       struct sk_buff *lp, *p = *head;
        unsigned int delta_truesize;
-       unsigned int headroom;
 
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
@@ -3276,48 +3275,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
        }
-       /* switch back to head shinfo */
-       pinfo = skb_shinfo(p);
-
-       if (pinfo->frag_list)
-               goto merge;
-       if (skb_gro_len(p) != pinfo->gso_size)
-               return -E2BIG;
-
-       headroom = skb_headroom(p);
-       nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
-       if (unlikely(!nskb))
-               return -ENOMEM;
-
-       __copy_skb_header(nskb, p);
-       nskb->mac_len = p->mac_len;
-
-       skb_reserve(nskb, headroom);
-       __skb_put(nskb, skb_gro_offset(p));
-
-       skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
-       skb_set_network_header(nskb, skb_network_offset(p));
-       skb_set_transport_header(nskb, skb_transport_offset(p));
-
-       __skb_pull(p, skb_gro_offset(p));
-       memcpy(skb_mac_header(nskb), skb_mac_header(p),
-              p->data - skb_mac_header(p));
-
-       skb_shinfo(nskb)->frag_list = p;
-       skb_shinfo(nskb)->gso_size = pinfo->gso_size;
-       pinfo->gso_size = 0;
-       __skb_header_release(p);
-       NAPI_GRO_CB(nskb)->last = p;
-
-       nskb->data_len += p->len;
-       nskb->truesize += p->truesize;
-       nskb->len += p->len;
-
-       *head = nskb;
-       nskb->next = p->next;
-       p->next = NULL;
-
-       p = nskb;
 
 merge:
        delta_truesize = skb->truesize;
@@ -3620,13 +3577,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
 {
        struct sk_buff_head *q = &sk->sk_error_queue;
        struct sk_buff *skb, *skb_next;
+       unsigned long flags;
        int err = 0;
 
-       spin_lock_bh(&q->lock);
+       spin_lock_irqsave(&q->lock, flags);
        skb = __skb_dequeue(q);
        if (skb && (skb_next = skb_peek(q)))
                err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
-       spin_unlock_bh(&q->lock);
+       spin_unlock_irqrestore(&q->lock, flags);
 
        sk->sk_err = err;
        if (err)
index 93ea80196f0ec383cca46d28bf8c4c96d0310b25..5b21f6f88e9798b839a60a41c4e6ccda58bcf1f0 100644 (file)
@@ -177,6 +177,8 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
        [DCB_ATTR_IEEE_PFC]         = {.len = sizeof(struct ieee_pfc)},
        [DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
        [DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
+       [DCB_ATTR_IEEE_QCN]         = {.len = sizeof(struct ieee_qcn)},
+       [DCB_ATTR_IEEE_QCN_STATS]   = {.len = sizeof(struct ieee_qcn_stats)},
 };
 
 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -1030,7 +1032,7 @@ nla_put_failure:
        return err;
 }
 
-/* Handle IEEE 802.1Qaz GET commands. */
+/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nlattr *ieee, *app;
@@ -1067,6 +1069,32 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
                }
        }
 
+       if (ops->ieee_getqcn) {
+               struct ieee_qcn qcn;
+
+               memset(&qcn, 0, sizeof(qcn));
+               err = ops->ieee_getqcn(netdev, &qcn);
+               if (!err) {
+                       err = nla_put(skb, DCB_ATTR_IEEE_QCN,
+                                     sizeof(qcn), &qcn);
+                       if (err)
+                               return -EMSGSIZE;
+               }
+       }
+
+       if (ops->ieee_getqcnstats) {
+               struct ieee_qcn_stats qcn_stats;
+
+               memset(&qcn_stats, 0, sizeof(qcn_stats));
+               err = ops->ieee_getqcnstats(netdev, &qcn_stats);
+               if (!err) {
+                       err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
+                                     sizeof(qcn_stats), &qcn_stats);
+                       if (err)
+                               return -EMSGSIZE;
+               }
+       }
+
        if (ops->ieee_getpfc) {
                struct ieee_pfc pfc;
                memset(&pfc, 0, sizeof(pfc));
@@ -1379,8 +1407,9 @@ int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
 }
 EXPORT_SYMBOL(dcbnl_cee_notify);
 
-/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
- * be completed the entire msg is aborted and error value is returned.
+/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
+ * If any requested operation can not be completed
+ * the entire msg is aborted and error value is returned.
  * No attempt is made to reconcile the case where only part of the
  * cmd can be completed.
  */
@@ -1417,6 +1446,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
                        goto err;
        }
 
+       if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
+               struct ieee_qcn *qcn =
+                       nla_data(ieee[DCB_ATTR_IEEE_QCN]);
+
+               err = ops->ieee_setqcn(netdev, qcn);
+               if (err)
+                       goto err;
+       }
+
        if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
                struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
                err = ops->ieee_setpfc(netdev, pfc);
index f123c6c6748c10b98e7e7500c108936dc634a128..be1f08cdad29135238c59c52f7c6bbc6548d39d2 100644 (file)
 #include <net/dn_route.h>
 
 static int dn_neigh_construct(struct neighbour *);
-static void dn_long_error_report(struct neighbour *, struct sk_buff *);
-static void dn_short_error_report(struct neighbour *, struct sk_buff *);
-static int dn_long_output(struct neighbour *, struct sk_buff *);
-static int dn_short_output(struct neighbour *, struct sk_buff *);
-static int dn_phase3_output(struct neighbour *, struct sk_buff *);
-
-
-/*
- * For talking to broadcast devices: Ethernet & PPP
- */
-static const struct neigh_ops dn_long_ops = {
-       .family =               AF_DECnet,
-       .error_report =         dn_long_error_report,
-       .output =               dn_long_output,
-       .connected_output =     dn_long_output,
-};
+static void dn_neigh_error_report(struct neighbour *, struct sk_buff *);
+static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb);
 
 /*
- * For talking to pointopoint and multidrop devices: DDCMP and X.25
+ * Operations for adding the link layer header.
  */
-static const struct neigh_ops dn_short_ops = {
+static const struct neigh_ops dn_neigh_ops = {
        .family =               AF_DECnet,
-       .error_report =         dn_short_error_report,
-       .output =               dn_short_output,
-       .connected_output =     dn_short_output,
-};
-
-/*
- * For talking to DECnet phase III nodes
- */
-static const struct neigh_ops dn_phase3_ops = {
-       .family =               AF_DECnet,
-       .error_report =         dn_short_error_report, /* Can use short version here */
-       .output =               dn_phase3_output,
-       .connected_output =     dn_phase3_output,
+       .error_report =         dn_neigh_error_report,
+       .output =               dn_neigh_output,
+       .connected_output =     dn_neigh_output,
 };
 
 static u32 dn_neigh_hash(const void *pkey,
@@ -93,12 +69,18 @@ static u32 dn_neigh_hash(const void *pkey,
        return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
 }
 
+static bool dn_key_eq(const struct neighbour *neigh, const void *pkey)
+{
+       return neigh_key_eq16(neigh, pkey);
+}
+
 struct neigh_table dn_neigh_table = {
        .family =                       PF_DECnet,
        .entry_size =                   NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
        .key_len =                      sizeof(__le16),
        .protocol =                     cpu_to_be16(ETH_P_DNA_RT),
        .hash =                         dn_neigh_hash,
+       .key_eq =                       dn_key_eq,
        .constructor =                  dn_neigh_construct,
        .id =                           "dn_neigh_cache",
        .parms ={
@@ -147,16 +129,9 @@ static int dn_neigh_construct(struct neighbour *neigh)
 
        __neigh_parms_put(neigh->parms);
        neigh->parms = neigh_parms_clone(parms);
-
-       if (dn_db->use_long)
-               neigh->ops = &dn_long_ops;
-       else
-               neigh->ops = &dn_short_ops;
        rcu_read_unlock();
 
-       if (dn->flags & DN_NDFLAG_P3)
-               neigh->ops = &dn_phase3_ops;
-
+       neigh->ops = &dn_neigh_ops;
        neigh->nud_state = NUD_NOARP;
        neigh->output = neigh->ops->connected_output;
 
@@ -188,24 +163,16 @@ static int dn_neigh_construct(struct neighbour *neigh)
        return 0;
 }
 
-static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb)
+static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb)
 {
-       printk(KERN_DEBUG "dn_long_error_report: called\n");
+       printk(KERN_DEBUG "dn_neigh_error_report: called\n");
        kfree_skb(skb);
 }
 
-
-static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb)
-{
-       printk(KERN_DEBUG "dn_short_error_report: called\n");
-       kfree_skb(skb);
-}
-
-static int dn_neigh_output_packet(struct sk_buff *skb)
+static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
-       struct neighbour *neigh = rt->n;
        struct net_device *dev = neigh->dev;
        char mac_addr[ETH_ALEN];
        unsigned int seq;
@@ -227,6 +194,18 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
        return err;
 }
 
+static int dn_neigh_output_packet(struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+       struct dn_route *rt = (struct dn_route *)dst;
+       struct neighbour *neigh = rt->n;
+
+       return neigh->output(neigh, skb);
+}
+
+/*
+ * For talking to broadcast devices: Ethernet & PPP
+ */
 static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
@@ -270,6 +249,9 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
                       neigh->dev, dn_neigh_output_packet);
 }
 
+/*
+ * For talking to pointopoint and multidrop devices: DDCMP and X.25
+ */
 static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
 {
        struct net_device *dev = neigh->dev;
@@ -307,7 +289,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
 }
 
 /*
- * Phase 3 output is the same is short output, execpt that
+ * For talking to DECnet phase III nodes
+ * Phase 3 output is the same as short output, execpt that
  * it clears the area bits before transmission.
  */
 static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
@@ -345,6 +328,32 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
                       neigh->dev, dn_neigh_output_packet);
 }
 
+int dn_to_neigh_output(struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+       struct dn_route *rt = (struct dn_route *) dst;
+       struct neighbour *neigh = rt->n;
+       struct dn_neigh *dn = (struct dn_neigh *)neigh;
+       struct dn_dev *dn_db;
+       bool use_long;
+
+       rcu_read_lock();
+       dn_db = rcu_dereference(neigh->dev->dn_ptr);
+       if (dn_db == NULL) {
+               rcu_read_unlock();
+               return -EINVAL;
+       }
+       use_long = dn_db->use_long;
+       rcu_read_unlock();
+
+       if (dn->flags & DN_NDFLAG_P3)
+               return dn_phase3_output(neigh, skb);
+       if (use_long)
+               return dn_long_output(neigh, skb);
+       else
+               return dn_short_output(neigh, skb);
+}
+
 /*
  * Unfortunately, the neighbour code uses the device in its hash
  * function, so we don't get any advantage from it. This function
index 1d7c1256e8458d35e4a9f9daa392aba37672e1bf..771815575dbdce112ea96af3aaf88f8cf9e8b9e3 100644 (file)
@@ -743,15 +743,6 @@ out:
        return NET_RX_DROP;
 }
 
-static int dn_to_neigh_output(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       struct dn_route *rt = (struct dn_route *) dst;
-       struct neighbour *n = rt->n;
-
-       return n->output(n, skb);
-}
-
 static int dn_output(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
@@ -1062,7 +1053,7 @@ source_ok:
        if (decnet_debug_level & 16)
                printk(KERN_DEBUG
                       "dn_route_output_slow: initial checks complete."
-                      " dst=%o4x src=%04x oif=%d try_hard=%d\n",
+                      " dst=%04x src=%04x oif=%d try_hard=%d\n",
                       le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
                       fld.flowidn_oif, try_hard);
 
index a1d1f0775bea98df88c1ff1bd775d360393d23fe..b40f11bb419c1d9a2796b4a57d6978895639f6d4 100644 (file)
@@ -175,43 +175,14 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
 #endif /* CONFIG_NET_DSA_HWMON */
 
 /* basic switch operations **************************************************/
-static struct dsa_switch *
-dsa_switch_setup(struct dsa_switch_tree *dst, int index,
-                struct device *parent, struct device *host_dev)
+static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 {
-       struct dsa_chip_data *pd = dst->pd->chip + index;
-       struct dsa_switch_driver *drv;
-       struct dsa_switch *ds;
-       int ret;
-       char *name;
-       int i;
+       struct dsa_switch_driver *drv = ds->drv;
+       struct dsa_switch_tree *dst = ds->dst;
+       struct dsa_chip_data *pd = ds->pd;
        bool valid_name_found = false;
-
-       /*
-        * Probe for switch model.
-        */
-       drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
-       if (drv == NULL) {
-               netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
-                          index);
-               return ERR_PTR(-EINVAL);
-       }
-       netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
-                   index, name);
-
-
-       /*
-        * Allocate and initialise switch state.
-        */
-       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
-       if (ds == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       ds->dst = dst;
-       ds->index = index;
-       ds->pd = dst->pd->chip + index;
-       ds->drv = drv;
-       ds->master_dev = host_dev;
+       int index = ds->index;
+       int i, ret;
 
        /*
         * Validate supplied switch configuration.
@@ -256,7 +227,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
         * switch.
         */
        if (dst->cpu_switch == index) {
-               switch (drv->tag_protocol) {
+               switch (ds->tag_protocol) {
 #ifdef CONFIG_NET_DSA_TAG_DSA
                case DSA_TAG_PROTO_DSA:
                        dst->rcv = dsa_netdev_ops.rcv;
@@ -284,7 +255,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
                        goto out;
                }
 
-               dst->tag_protocol = drv->tag_protocol;
+               dst->tag_protocol = ds->tag_protocol;
        }
 
        /*
@@ -350,13 +321,57 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        }
 #endif /* CONFIG_NET_DSA_HWMON */
 
-       return ds;
+       return ret;
 
 out_free:
        mdiobus_free(ds->slave_mii_bus);
 out:
        kfree(ds);
-       return ERR_PTR(ret);
+       return ret;
+}
+
+static struct dsa_switch *
+dsa_switch_setup(struct dsa_switch_tree *dst, int index,
+                struct device *parent, struct device *host_dev)
+{
+       struct dsa_chip_data *pd = dst->pd->chip + index;
+       struct dsa_switch_driver *drv;
+       struct dsa_switch *ds;
+       int ret;
+       char *name;
+
+       /*
+        * Probe for switch model.
+        */
+       drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
+       if (drv == NULL) {
+               netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
+                          index);
+               return ERR_PTR(-EINVAL);
+       }
+       netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
+                   index, name);
+
+
+       /*
+        * Allocate and initialise switch state.
+        */
+       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+       if (ds == NULL)
+               return NULL;
+
+       ds->dst = dst;
+       ds->index = index;
+       ds->pd = pd;
+       ds->drv = drv;
+       ds->tag_protocol = drv->tag_protocol;
+       ds->master_dev = host_dev;
+
+       ret = dsa_switch_setup_one(ds, parent);
+       if (ret)
+               return NULL;
+
+       return ds;
 }
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
@@ -563,9 +578,9 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
        kfree(pd->chip);
 }
 
-static int dsa_of_probe(struct platform_device *pdev)
+static int dsa_of_probe(struct device *dev)
 {
-       struct device_node *np = pdev->dev.of_node;
+       struct device_node *np = dev->of_node;
        struct device_node *child, *mdio, *ethernet, *port, *link;
        struct mii_bus *mdio_bus;
        struct platform_device *ethernet_dev;
@@ -583,7 +598,7 @@ static int dsa_of_probe(struct platform_device *pdev)
 
        mdio_bus = of_mdio_find_bus(mdio);
        if (!mdio_bus)
-               return -EINVAL;
+               return -EPROBE_DEFER;
 
        ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
        if (!ethernet)
@@ -591,13 +606,13 @@ static int dsa_of_probe(struct platform_device *pdev)
 
        ethernet_dev = of_find_device_by_node(ethernet);
        if (!ethernet_dev)
-               return -ENODEV;
+               return -EPROBE_DEFER;
 
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd)
                return -ENOMEM;
 
-       pdev->dev.platform_data = pd;
+       dev->platform_data = pd;
        pd->netdev = &ethernet_dev->dev;
        pd->nr_chips = of_get_available_child_count(np);
        if (pd->nr_chips > DSA_MAX_SWITCHES)
@@ -670,43 +685,86 @@ out_free_chip:
        dsa_of_free_platform_data(pd);
 out_free:
        kfree(pd);
-       pdev->dev.platform_data = NULL;
+       dev->platform_data = NULL;
        return ret;
 }
 
-static void dsa_of_remove(struct platform_device *pdev)
+static void dsa_of_remove(struct device *dev)
 {
-       struct dsa_platform_data *pd = pdev->dev.platform_data;
+       struct dsa_platform_data *pd = dev->platform_data;
 
-       if (!pdev->dev.of_node)
+       if (!dev->of_node)
                return;
 
        dsa_of_free_platform_data(pd);
        kfree(pd);
 }
 #else
-static inline int dsa_of_probe(struct platform_device *pdev)
+static inline int dsa_of_probe(struct device *dev)
 {
        return 0;
 }
 
-static inline void dsa_of_remove(struct platform_device *pdev)
+static inline void dsa_of_remove(struct device *dev)
 {
 }
 #endif
 
+static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+                         struct device *parent, struct dsa_platform_data *pd)
+{
+       int i;
+
+       dst->pd = pd;
+       dst->master_netdev = dev;
+       dst->cpu_switch = -1;
+       dst->cpu_port = -1;
+
+       for (i = 0; i < pd->nr_chips; i++) {
+               struct dsa_switch *ds;
+
+               ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev);
+               if (IS_ERR(ds)) {
+                       netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
+                                  i, PTR_ERR(ds));
+                       continue;
+               }
+
+               dst->ds[i] = ds;
+               if (ds->drv->poll_link != NULL)
+                       dst->link_poll_needed = 1;
+       }
+
+       /*
+        * If we use a tagging format that doesn't have an ethertype
+        * field, make sure that all packets from this point on get
+        * sent to the tag format's receive function.
+        */
+       wmb();
+       dev->dsa_ptr = (void *)dst;
+
+       if (dst->link_poll_needed) {
+               INIT_WORK(&dst->link_poll_work, dsa_link_poll_work);
+               init_timer(&dst->link_poll_timer);
+               dst->link_poll_timer.data = (unsigned long)dst;
+               dst->link_poll_timer.function = dsa_link_poll_timer;
+               dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
+               add_timer(&dst->link_poll_timer);
+       }
+}
+
 static int dsa_probe(struct platform_device *pdev)
 {
        struct dsa_platform_data *pd = pdev->dev.platform_data;
        struct net_device *dev;
        struct dsa_switch_tree *dst;
-       int i, ret;
+       int ret;
 
        pr_notice_once("Distributed Switch Architecture driver version %s\n",
                       dsa_driver_version);
 
        if (pdev->dev.of_node) {
-               ret = dsa_of_probe(pdev);
+               ret = dsa_of_probe(&pdev->dev);
                if (ret)
                        return ret;
 
@@ -718,7 +776,7 @@ static int dsa_probe(struct platform_device *pdev)
 
        dev = dev_to_net_device(pd->netdev);
        if (dev == NULL) {
-               ret = -EINVAL;
+               ret = -EPROBE_DEFER;
                goto out;
        }
 
@@ -737,54 +795,18 @@ static int dsa_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dst);
 
-       dst->pd = pd;
-       dst->master_netdev = dev;
-       dst->cpu_switch = -1;
-       dst->cpu_port = -1;
-
-       for (i = 0; i < pd->nr_chips; i++) {
-               struct dsa_switch *ds;
-
-               ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev);
-               if (IS_ERR(ds)) {
-                       netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
-                                  i, PTR_ERR(ds));
-                       continue;
-               }
-
-               dst->ds[i] = ds;
-               if (ds->drv->poll_link != NULL)
-                       dst->link_poll_needed = 1;
-       }
-
-       /*
-        * If we use a tagging format that doesn't have an ethertype
-        * field, make sure that all packets from this point on get
-        * sent to the tag format's receive function.
-        */
-       wmb();
-       dev->dsa_ptr = (void *)dst;
-
-       if (dst->link_poll_needed) {
-               INIT_WORK(&dst->link_poll_work, dsa_link_poll_work);
-               init_timer(&dst->link_poll_timer);
-               dst->link_poll_timer.data = (unsigned long)dst;
-               dst->link_poll_timer.function = dsa_link_poll_timer;
-               dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
-               add_timer(&dst->link_poll_timer);
-       }
+       dsa_setup_dst(dst, dev, &pdev->dev, pd);
 
        return 0;
 
 out:
-       dsa_of_remove(pdev);
+       dsa_of_remove(&pdev->dev);
 
        return ret;
 }
 
-static int dsa_remove(struct platform_device *pdev)
+static void dsa_remove_dst(struct dsa_switch_tree *dst)
 {
-       struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
        int i;
 
        if (dst->link_poll_needed)
@@ -798,8 +820,14 @@ static int dsa_remove(struct platform_device *pdev)
                if (ds != NULL)
                        dsa_switch_destroy(ds);
        }
+}
+
+static int dsa_remove(struct platform_device *pdev)
+{
+       struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
 
-       dsa_of_remove(pdev);
+       dsa_remove_dst(dst);
+       dsa_of_remove(&pdev->dev);
 
        return 0;
 }
index 8dbdf6c910b7e5ba44827000e70e6bf15e97a1ce..f3bad41d725f449f91d0b1b4f7119a9c9660e976 100644 (file)
@@ -104,7 +104,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
         */
 
        if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
-               memset(eth->h_dest, 0, ETH_ALEN);
+               eth_zero_addr(eth->h_dest);
                return ETH_HLEN;
        }
 
@@ -357,7 +357,7 @@ void ether_setup(struct net_device *dev)
        dev->flags              = IFF_BROADCAST|IFF_MULTICAST;
        dev->priv_flags         |= IFF_TX_SKB_SHARING;
 
-       memset(dev->broadcast, 0xFF, ETH_ALEN);
+       eth_broadcast_addr(dev->broadcast);
 
 }
 EXPORT_SYMBOL(ether_setup);
index a138d75751df2fb46219168c01fd1bf5cce24d43..44d27469ae55982d1895021b79ba76a85c1324a8 100644 (file)
@@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
        struct hsr_port *port;
 
        hsr = netdev_priv(hsr_dev);
+
+       rtnl_lock();
        hsr_for_each_port(hsr, port)
                hsr_del_port(port);
+       rtnl_unlock();
 
        del_timer_sync(&hsr->prune_timer);
        del_timer_sync(&hsr->announce_timer);
index 779d28b65417a6e62b687d8f5ea36d6be285f417..cd37d0011b424824fd113ffd4da59f36c116996a 100644 (file)
@@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                        return NOTIFY_DONE;     /* Not an HSR device */
                hsr = netdev_priv(dev);
                port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+               if (port == NULL) {
+                       /* Resend of notification concerning removed device? */
+                       return NOTIFY_DONE;
+               }
        } else {
                hsr = port->hsr;
        }
index a348dcbcd683e6858248bf17ee73e7e24d08b4ea..7d37366cc695554ae243f940869b46d26f598b65 100644 (file)
@@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port)
        list_del_rcu(&port->port_list);
 
        if (port != master) {
-               netdev_update_features(master->dev);
-               dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               if (master != NULL) {
+                       netdev_update_features(master->dev);
+                       dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               }
                netdev_rx_handler_unregister(port->dev);
                dev_set_promiscuity(port->dev, -1);
        }
@@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port)
         */
 
        synchronize_rcu();
-       dev_put(port->dev);
+
+       if (port != master)
+               dev_put(port->dev);
 }
index 6b8aad6a0d7dd2b28c6d52d16b3357136a73241b..5f5c674e130ab438881745a9049ff028f900b6eb 100644 (file)
  *     Interface to generic neighbour cache.
  */
 static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd);
+static bool arp_key_eq(const struct neighbour *n, const void *pkey);
 static int arp_constructor(struct neighbour *neigh);
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -154,6 +155,7 @@ struct neigh_table arp_tbl = {
        .key_len        = 4,
        .protocol       = cpu_to_be16(ETH_P_IP),
        .hash           = arp_hash,
+       .key_eq         = arp_key_eq,
        .constructor    = arp_constructor,
        .proxy_redo     = parp_redo,
        .id             = "arp_cache",
@@ -209,7 +211,12 @@ static u32 arp_hash(const void *pkey,
                    const struct net_device *dev,
                    __u32 *hash_rnd)
 {
-       return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd);
+       return arp_hashfn(pkey, dev, hash_rnd);
+}
+
+static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
+{
+       return neigh_key_eq32(neigh, pkey);
 }
 
 static int arp_constructor(struct neighbour *neigh)
index 57be71dd6a9e0163dceefd564bf71036c12dc9ba..e067770235bfb2c58cd88c10ba1eca1ce666b200 100644 (file)
@@ -89,17 +89,14 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
 
        switch (id) {
        case RT_TABLE_LOCAL:
-               net->ipv4.fib_local = tb;
+               rcu_assign_pointer(net->ipv4.fib_local, tb);
                break;
-
        case RT_TABLE_MAIN:
-               net->ipv4.fib_main = tb;
+               rcu_assign_pointer(net->ipv4.fib_main, tb);
                break;
-
        case RT_TABLE_DEFAULT:
-               net->ipv4.fib_default = tb;
+               rcu_assign_pointer(net->ipv4.fib_default, tb);
                break;
-
        default:
                break;
        }
@@ -132,13 +129,14 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
 static void fib_flush(struct net *net)
 {
        int flushed = 0;
-       struct fib_table *tb;
-       struct hlist_head *head;
        unsigned int h;
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
-               head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry(tb, head, tb_hlist)
+               struct hlist_head *head = &net->ipv4.fib_table_hash[h];
+               struct hlist_node *tmp;
+               struct fib_table *tb;
+
+               hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
                        flushed += fib_table_flush(tb);
        }
 
@@ -146,6 +144,19 @@ static void fib_flush(struct net *net)
                rt_cache_flush(net);
 }
 
+void fib_flush_external(struct net *net)
+{
+       struct fib_table *tb;
+       struct hlist_head *head;
+       unsigned int h;
+
+       for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
+               head = &net->ipv4.fib_table_hash[h];
+               hlist_for_each_entry(tb, head, tb_hlist)
+                       fib_table_flush_external(tb);
+       }
+}
+
 /*
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
@@ -665,10 +676,12 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        s_h = cb->args[0];
        s_e = cb->args[1];
 
+       rcu_read_lock();
+
        for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
                e = 0;
                head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry(tb, head, tb_hlist) {
+               hlist_for_each_entry_rcu(tb, head, tb_hlist) {
                        if (e < s_e)
                                goto next;
                        if (dumped)
@@ -682,6 +695,8 @@ next:
                }
        }
 out:
+       rcu_read_unlock();
+
        cb->args[1] = e;
        cb->args[0] = h;
 
@@ -1117,14 +1132,34 @@ static void ip_fib_net_exit(struct net *net)
 
        rtnl_lock();
        for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
-               struct fib_table *tb;
-               struct hlist_head *head;
+               struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
+               struct fib_table *tb;
+
+               /* this is done in two passes as flushing the table could
+                * cause it to be reallocated in order to accommodate new
+                * tnodes at the root as the table shrinks.
+                */
+               hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
+                       fib_table_flush(tb);
 
-               head = &net->ipv4.fib_table_hash[i];
                hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+                       switch (tb->tb_id) {
+                       case RT_TABLE_LOCAL:
+                               RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
+                               break;
+                       case RT_TABLE_MAIN:
+                               RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
+                               break;
+                       case RT_TABLE_DEFAULT:
+                               RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
+                               break;
+                       default:
+                               break;
+                       }
+#endif
                        hlist_del(&tb->tb_hlist);
-                       fib_table_flush(tb);
                        fib_free_table(tb);
                }
        }
index d3db718be51d17282becc0864d050adfcc77522f..190d0d00d74422ea21488923d44bedd6a623fa97 100644 (file)
@@ -209,6 +209,8 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        rule4->tos = frh->tos;
 
        net->ipv4.fib_has_custom_rules = true;
+       fib_flush_external(rule->fr_net);
+
        err = 0;
 errout:
        return err;
@@ -224,6 +226,7 @@ static void fib4_rule_delete(struct fib_rule *rule)
                net->ipv4.fib_num_tclassid_users--;
 #endif
        net->ipv4.fib_has_custom_rules = true;
+       fib_flush_external(rule->fr_net);
 }
 
 static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
index f48534577f8dee0b7355639206265ea754c009be..90955455884eda95f339b163ecc2fe9b5a52e44b 100644 (file)
@@ -79,6 +79,7 @@
 #include <net/tcp.h>
 #include <net/sock.h>
 #include <net/ip_fib.h>
+#include <net/switchdev.h>
 #include "fib_lookup.h"
 
 #define MAX_STAT_DEPTH 32
 
 typedef unsigned int t_key;
 
-#define IS_TNODE(n) ((n)->bits)
-#define IS_LEAF(n) (!(n)->bits)
+#define IS_TRIE(n)     ((n)->pos >= KEYLENGTH)
+#define IS_TNODE(n)    ((n)->bits)
+#define IS_LEAF(n)     (!(n)->bits)
 
-#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
-
-struct tnode {
+struct key_vector {
        t_key key;
-       unsigned char bits;             /* 2log(KEYLENGTH) bits needed */
        unsigned char pos;              /* 2log(KEYLENGTH) bits needed */
+       unsigned char bits;             /* 2log(KEYLENGTH) bits needed */
        unsigned char slen;
-       struct tnode __rcu *parent;
-       struct rcu_head rcu;
        union {
-               /* The fields in this struct are valid if bits > 0 (TNODE) */
-               struct {
-                       t_key empty_children; /* KEYLENGTH bits needed */
-                       t_key full_children;  /* KEYLENGTH bits needed */
-                       struct tnode __rcu *child[0];
-               };
-               /* This list pointer if valid if bits == 0 (LEAF) */
+               /* This list pointer if valid if (pos | bits) == 0 (LEAF) */
                struct hlist_head leaf;
+               /* This array is valid if (pos | bits) > 0 (TNODE) */
+               struct key_vector __rcu *tnode[0];
        };
 };
 
+struct tnode {
+       struct rcu_head rcu;
+       t_key empty_children;           /* KEYLENGTH bits needed */
+       t_key full_children;            /* KEYLENGTH bits needed */
+       struct key_vector __rcu *parent;
+       struct key_vector kv[1];
+#define tn_bits kv[0].bits
+};
+
+#define TNODE_SIZE(n)  offsetof(struct tnode, kv[0].tnode[n])
+#define LEAF_SIZE      TNODE_SIZE(1)
+
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 struct trie_use_stats {
        unsigned int gets;
@@ -134,13 +140,13 @@ struct trie_stat {
 };
 
 struct trie {
-       struct tnode __rcu *trie;
+       struct key_vector kv[1];
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie_use_stats __percpu *stats;
 #endif
 };
 
-static void resize(struct trie *t, struct tnode *tn);
+static struct key_vector *resize(struct trie *t, struct key_vector *tn);
 static size_t tnode_free_size;
 
 /*
@@ -153,41 +159,46 @@ static const int sync_pages = 128;
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
 
+static inline struct tnode *tn_info(struct key_vector *kv)
+{
+       return container_of(kv, struct tnode, kv[0]);
+}
+
 /* caller must hold RTNL */
-#define node_parent(n) rtnl_dereference((n)->parent)
+#define node_parent(tn) rtnl_dereference(tn_info(tn)->parent)
+#define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
 
 /* caller must hold RCU read lock or RTNL */
-#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
+#define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent)
+#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
 
 /* wrapper for rcu_assign_pointer */
-static inline void node_set_parent(struct tnode *n, struct tnode *tp)
+static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
 {
        if (n)
-               rcu_assign_pointer(n->parent, tp);
+               rcu_assign_pointer(tn_info(n)->parent, tp);
 }
 
-#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
+#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
 
 /* This provides us with the number of children in this node, in the case of a
  * leaf this will return 0 meaning none of the children are accessible.
  */
-static inline unsigned long tnode_child_length(const struct tnode *tn)
+static inline unsigned long child_length(const struct key_vector *tn)
 {
        return (1ul << tn->bits) & ~(1ul);
 }
 
-/* caller must hold RTNL */
-static inline struct tnode *tnode_get_child(const struct tnode *tn,
-                                           unsigned long i)
-{
-       return rtnl_dereference(tn->child[i]);
-}
+#define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
 
-/* caller must hold RCU read lock or RTNL */
-static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
-                                               unsigned long i)
+static inline unsigned long get_index(t_key key, struct key_vector *kv)
 {
-       return rcu_dereference_rtnl(tn->child[i]);
+       unsigned long index = key ^ kv->key;
+
+       if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
+               return 0;
+
+       return index >> kv->pos;
 }
 
 /* To understand this stuff, an understanding of keys and all their bits is
@@ -266,90 +277,104 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
 }
 
 #define TNODE_KMALLOC_MAX \
-       ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
+       ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
+#define TNODE_VMALLOC_MAX \
+       ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
 
 static void __node_free_rcu(struct rcu_head *head)
 {
        struct tnode *n = container_of(head, struct tnode, rcu);
 
-       if (IS_LEAF(n))
+       if (!n->tn_bits)
                kmem_cache_free(trie_leaf_kmem, n);
-       else if (n->bits <= TNODE_KMALLOC_MAX)
+       else if (n->tn_bits <= TNODE_KMALLOC_MAX)
                kfree(n);
        else
                vfree(n);
 }
 
-#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
+#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
 
-static struct tnode *tnode_alloc(size_t size)
+static struct tnode *tnode_alloc(int bits)
 {
+       size_t size;
+
+       /* verify bits is within bounds */
+       if (bits > TNODE_VMALLOC_MAX)
+               return NULL;
+
+       /* determine size and verify it is non-zero and didn't overflow */
+       size = TNODE_SIZE(1ul << bits);
+
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
        else
                return vzalloc(size);
 }
 
-static inline void empty_child_inc(struct tnode *n)
+static inline void empty_child_inc(struct key_vector *n)
 {
-       ++n->empty_children ? : ++n->full_children;
+       ++tn_info(n)->empty_children ? : ++tn_info(n)->full_children;
 }
 
-static inline void empty_child_dec(struct tnode *n)
+static inline void empty_child_dec(struct key_vector *n)
 {
-       n->empty_children-- ? : n->full_children--;
+       tn_info(n)->empty_children-- ? : tn_info(n)->full_children--;
 }
 
-static struct tnode *leaf_new(t_key key)
+static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
 {
-       struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
-       if (l) {
-               l->parent = NULL;
-               /* set key and pos to reflect full key value
-                * any trailing zeros in the key should be ignored
-                * as the nodes are searched
-                */
-               l->key = key;
-               l->slen = 0;
-               l->pos = 0;
-               /* set bits to 0 indicating we are not a tnode */
-               l->bits = 0;
+       struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
+       struct key_vector *l = kv->kv;
+
+       if (!kv)
+               return NULL;
+
+       /* initialize key vector */
+       l->key = key;
+       l->pos = 0;
+       l->bits = 0;
+       l->slen = fa->fa_slen;
+
+       /* link leaf to fib alias */
+       INIT_HLIST_HEAD(&l->leaf);
+       hlist_add_head(&fa->fa_list, &l->leaf);
 
-               INIT_HLIST_HEAD(&l->leaf);
-       }
        return l;
 }
 
-static struct tnode *tnode_new(t_key key, int pos, int bits)
+static struct key_vector *tnode_new(t_key key, int pos, int bits)
 {
-       size_t sz = offsetof(struct tnode, child[1ul << bits]);
-       struct tnode *tn = tnode_alloc(sz);
+       struct tnode *tnode = tnode_alloc(bits);
        unsigned int shift = pos + bits;
+       struct key_vector *tn = tnode->kv;
 
        /* verify bits and pos their msb bits clear and values are valid */
        BUG_ON(!bits || (shift > KEYLENGTH));
 
-       if (tn) {
-               tn->parent = NULL;
-               tn->slen = pos;
-               tn->pos = pos;
-               tn->bits = bits;
-               tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
-               if (bits == KEYLENGTH)
-                       tn->full_children = 1;
-               else
-                       tn->empty_children = 1ul << bits;
-       }
+       pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
+                sizeof(struct key_vector *) << bits);
+
+       if (!tnode)
+               return NULL;
+
+       if (bits == KEYLENGTH)
+               tnode->full_children = 1;
+       else
+               tnode->empty_children = 1ul << bits;
+
+       tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
+       tn->pos = pos;
+       tn->bits = bits;
+       tn->slen = pos;
 
-       pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
-                sizeof(struct tnode *) << bits);
        return tn;
 }
 
 /* Check whether a tnode 'n' is "full", i.e. it is an internal node
  * and no bits are skipped. See discussion in dyntree paper p. 6
  */
-static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
+static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
 {
        return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
 }
@@ -357,12 +382,13 @@ static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
 /* Add a child at position i overwriting the old value.
  * Update the value of full_children and empty_children.
  */
-static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
+static void put_child(struct key_vector *tn, unsigned long i,
+                     struct key_vector *n)
 {
-       struct tnode *chi = tnode_get_child(tn, i);
+       struct key_vector *chi = get_child(tn, i);
        int isfull, wasfull;
 
-       BUG_ON(i >= tnode_child_length(tn));
+       BUG_ON(i >= child_length(tn));
 
        /* update emptyChildren, overflow into fullChildren */
        if (n == NULL && chi != NULL)
@@ -375,23 +401,23 @@ static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
        isfull = tnode_full(tn, n);
 
        if (wasfull && !isfull)
-               tn->full_children--;
+               tn_info(tn)->full_children--;
        else if (!wasfull && isfull)
-               tn->full_children++;
+               tn_info(tn)->full_children++;
 
        if (n && (tn->slen < n->slen))
                tn->slen = n->slen;
 
-       rcu_assign_pointer(tn->child[i], n);
+       rcu_assign_pointer(tn->tnode[i], n);
 }
 
-static void update_children(struct tnode *tn)
+static void update_children(struct key_vector *tn)
 {
        unsigned long i;
 
        /* update all of the child parent pointers */
-       for (i = tnode_child_length(tn); i;) {
-               struct tnode *inode = tnode_get_child(tn, --i);
+       for (i = child_length(tn); i;) {
+               struct key_vector *inode = get_child(tn, --i);
 
                if (!inode)
                        continue;
@@ -407,36 +433,37 @@ static void update_children(struct tnode *tn)
        }
 }
 
-static inline void put_child_root(struct tnode *tp, struct trie *t,
-                                 t_key key, struct tnode *n)
+static inline void put_child_root(struct key_vector *tp, t_key key,
+                                 struct key_vector *n)
 {
-       if (tp)
-               put_child(tp, get_index(key, tp), n);
+       if (IS_TRIE(tp))
+               rcu_assign_pointer(tp->tnode[0], n);
        else
-               rcu_assign_pointer(t->trie, n);
+               put_child(tp, get_index(key, tp), n);
 }
 
-static inline void tnode_free_init(struct tnode *tn)
+static inline void tnode_free_init(struct key_vector *tn)
 {
-       tn->rcu.next = NULL;
+       tn_info(tn)->rcu.next = NULL;
 }
 
-static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
+static inline void tnode_free_append(struct key_vector *tn,
+                                    struct key_vector *n)
 {
-       n->rcu.next = tn->rcu.next;
-       tn->rcu.next = &n->rcu;
+       tn_info(n)->rcu.next = tn_info(tn)->rcu.next;
+       tn_info(tn)->rcu.next = &tn_info(n)->rcu;
 }
 
-static void tnode_free(struct tnode *tn)
+static void tnode_free(struct key_vector *tn)
 {
-       struct callback_head *head = &tn->rcu;
+       struct callback_head *head = &tn_info(tn)->rcu;
 
        while (head) {
                head = head->next;
-               tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
+               tnode_free_size += TNODE_SIZE(1ul << tn->bits);
                node_free(tn);
 
-               tn = container_of(head, struct tnode, rcu);
+               tn = container_of(head, struct tnode, rcu)->kv;
        }
 
        if (tnode_free_size >= PAGE_SIZE * sync_pages) {
@@ -445,14 +472,16 @@ static void tnode_free(struct tnode *tn)
        }
 }
 
-static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
+static struct key_vector *replace(struct trie *t,
+                                 struct key_vector *oldtnode,
+                                 struct key_vector *tn)
 {
-       struct tnode *tp = node_parent(oldtnode);
+       struct key_vector *tp = node_parent(oldtnode);
        unsigned long i;
 
        /* setup the parent pointer out of and back into this node */
        NODE_INIT_PARENT(tn, tp);
-       put_child_root(tp, t, tn->key, tn);
+       put_child_root(tp, tn->key, tn);
 
        /* update all of the child parent pointers */
        update_children(tn);
@@ -461,18 +490,21 @@ static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
        tnode_free(oldtnode);
 
        /* resize children now that oldtnode is freed */
-       for (i = tnode_child_length(tn); i;) {
-               struct tnode *inode = tnode_get_child(tn, --i);
+       for (i = child_length(tn); i;) {
+               struct key_vector *inode = get_child(tn, --i);
 
                /* resize child node */
                if (tnode_full(tn, inode))
-                       resize(t, inode);
+                       tn = resize(t, inode);
        }
+
+       return tp;
 }
 
-static int inflate(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *inflate(struct trie *t,
+                                 struct key_vector *oldtnode)
 {
-       struct tnode *tn;
+       struct key_vector *tn;
        unsigned long i;
        t_key m;
 
@@ -480,7 +512,7 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
 
        tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
        if (!tn)
-               return -ENOMEM;
+               goto notnode;
 
        /* prepare oldtnode to be freed */
        tnode_free_init(oldtnode);
@@ -490,9 +522,9 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
         * point to existing tnodes and the links between our allocated
         * nodes.
         */
-       for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
-               struct tnode *inode = tnode_get_child(oldtnode, --i);
-               struct tnode *node0, *node1;
+       for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
+               struct key_vector *inode = get_child(oldtnode, --i);
+               struct key_vector *node0, *node1;
                unsigned long j, k;
 
                /* An empty child */
@@ -510,8 +542,8 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
 
                /* An internal node with two children */
                if (inode->bits == 1) {
-                       put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
-                       put_child(tn, 2 * i, tnode_get_child(inode, 0));
+                       put_child(tn, 2 * i + 1, get_child(inode, 1));
+                       put_child(tn, 2 * i, get_child(inode, 0));
                        continue;
                }
 
@@ -540,11 +572,11 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
                tnode_free_append(tn, node0);
 
                /* populate child pointers in new nodes */
-               for (k = tnode_child_length(inode), j = k / 2; j;) {
-                       put_child(node1, --j, tnode_get_child(inode, --k));
-                       put_child(node0, j, tnode_get_child(inode, j));
-                       put_child(node1, --j, tnode_get_child(inode, --k));
-                       put_child(node0, j, tnode_get_child(inode, j));
+               for (k = child_length(inode), j = k / 2; j;) {
+                       put_child(node1, --j, get_child(inode, --k));
+                       put_child(node0, j, get_child(inode, j));
+                       put_child(node1, --j, get_child(inode, --k));
+                       put_child(node0, j, get_child(inode, j));
                }
 
                /* link new nodes to parent */
@@ -557,25 +589,25 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
        }
 
        /* setup the parent pointers into and out of this node */
-       replace(t, oldtnode, tn);
-
-       return 0;
+       return replace(t, oldtnode, tn);
 nomem:
        /* all pointers should be clean so we are done */
        tnode_free(tn);
-       return -ENOMEM;
+notnode:
+       return NULL;
 }
 
-static int halve(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *halve(struct trie *t,
+                               struct key_vector *oldtnode)
 {
-       struct tnode *tn;
+       struct key_vector *tn;
        unsigned long i;
 
        pr_debug("In halve\n");
 
        tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
        if (!tn)
-               return -ENOMEM;
+               goto notnode;
 
        /* prepare oldtnode to be freed */
        tnode_free_init(oldtnode);
@@ -585,10 +617,10 @@ static int halve(struct trie *t, struct tnode *oldtnode)
         * point to existing tnodes and the links between our allocated
         * nodes.
         */
-       for (i = tnode_child_length(oldtnode); i;) {
-               struct tnode *node1 = tnode_get_child(oldtnode, --i);
-               struct tnode *node0 = tnode_get_child(oldtnode, --i);
-               struct tnode *inode;
+       for (i = child_length(oldtnode); i;) {
+               struct key_vector *node1 = get_child(oldtnode, --i);
+               struct key_vector *node0 = get_child(oldtnode, --i);
+               struct key_vector *inode;
 
                /* At least one of the children is empty */
                if (!node1 || !node0) {
@@ -598,10 +630,8 @@ static int halve(struct trie *t, struct tnode *oldtnode)
 
                /* Two nonempty children */
                inode = tnode_new(node0->key, oldtnode->pos, 1);
-               if (!inode) {
-                       tnode_free(tn);
-                       return -ENOMEM;
-               }
+               if (!inode)
+                       goto nomem;
                tnode_free_append(tn, inode);
 
                /* initialize pointers out of node */
@@ -614,30 +644,36 @@ static int halve(struct trie *t, struct tnode *oldtnode)
        }
 
        /* setup the parent pointers into and out of this node */
-       replace(t, oldtnode, tn);
-
-       return 0;
+       return replace(t, oldtnode, tn);
+nomem:
+       /* all pointers should be clean so we are done */
+       tnode_free(tn);
+notnode:
+       return NULL;
 }
 
-static void collapse(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *collapse(struct trie *t,
+                                  struct key_vector *oldtnode)
 {
-       struct tnode *n, *tp;
+       struct key_vector *n, *tp;
        unsigned long i;
 
        /* scan the tnode looking for that one child that might still exist */
-       for (n = NULL, i = tnode_child_length(oldtnode); !n && i;)
-               n = tnode_get_child(oldtnode, --i);
+       for (n = NULL, i = child_length(oldtnode); !n && i;)
+               n = get_child(oldtnode, --i);
 
        /* compress one level */
        tp = node_parent(oldtnode);
-       put_child_root(tp, t, oldtnode->key, n);
+       put_child_root(tp, oldtnode->key, n);
        node_set_parent(n, tp);
 
        /* drop dead node */
        node_free(oldtnode);
+
+       return tp;
 }
 
-static unsigned char update_suffix(struct tnode *tn)
+static unsigned char update_suffix(struct key_vector *tn)
 {
        unsigned char slen = tn->pos;
        unsigned long stride, i;
@@ -647,8 +683,8 @@ static unsigned char update_suffix(struct tnode *tn)
         * why we start with a stride of 2 since a stride of 1 would
         * represent the nodes with suffix length equal to tn->pos
         */
-       for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
-               struct tnode *n = tnode_get_child(tn, i);
+       for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
+               struct key_vector *n = get_child(tn, i);
 
                if (!n || (n->slen <= slen))
                        continue;
@@ -680,12 +716,12 @@ static unsigned char update_suffix(struct tnode *tn)
  *
  * 'high' in this instance is the variable 'inflate_threshold'. It
  * is expressed as a percentage, so we multiply it with
- * tnode_child_length() and instead of multiplying by 2 (since the
+ * child_length() and instead of multiplying by 2 (since the
  * child array will be doubled by inflate()) and multiplying
  * the left-hand side by 100 (to handle the percentage thing) we
  * multiply the left-hand side by 50.
  *
- * The left-hand side may look a bit weird: tnode_child_length(tn)
+ * The left-hand side may look a bit weird: child_length(tn)
  * - tn->empty_children is of course the number of non-null children
  * in the current node. tn->full_children is the number of "full"
  * children, that is non-null tnodes with a skip value of 0.
@@ -695,10 +731,10 @@ static unsigned char update_suffix(struct tnode *tn)
  * A clearer way to write this would be:
  *
  * to_be_doubled = tn->full_children;
- * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
+ * not_to_be_doubled = child_length(tn) - tn->empty_children -
  *     tn->full_children;
  *
- * new_child_length = tnode_child_length(tn) * 2;
+ * new_child_length = child_length(tn) * 2;
  *
  * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
  *      new_child_length;
@@ -715,57 +751,57 @@ static unsigned char update_suffix(struct tnode *tn)
  *      inflate_threshold * new_child_length
  *
  * expand not_to_be_doubled and to_be_doubled, and shorten:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * 100 * (child_length(tn) - tn->empty_children +
  *    tn->full_children) >= inflate_threshold * new_child_length
  *
  * expand new_child_length:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * 100 * (child_length(tn) - tn->empty_children +
  *    tn->full_children) >=
- *      inflate_threshold * tnode_child_length(tn) * 2
+ *      inflate_threshold * child_length(tn) * 2
  *
  * shorten again:
- * 50 * (tn->full_children + tnode_child_length(tn) -
+ * 50 * (tn->full_children + child_length(tn) -
  *    tn->empty_children) >= inflate_threshold *
- *    tnode_child_length(tn)
+ *    child_length(tn)
  *
  */
-static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
+static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
 {
-       unsigned long used = tnode_child_length(tn);
+       unsigned long used = child_length(tn);
        unsigned long threshold = used;
 
        /* Keep root node larger */
-       threshold *= tp ? inflate_threshold : inflate_threshold_root;
-       used -= tn->empty_children;
-       used += tn->full_children;
+       threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold;
+       used -= tn_info(tn)->empty_children;
+       used += tn_info(tn)->full_children;
 
        /* if bits == KEYLENGTH then pos = 0, and will fail below */
 
        return (used > 1) && tn->pos && ((50 * used) >= threshold);
 }
 
-static bool should_halve(const struct tnode *tp, const struct tnode *tn)
+static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
 {
-       unsigned long used = tnode_child_length(tn);
+       unsigned long used = child_length(tn);
        unsigned long threshold = used;
 
        /* Keep root node larger */
-       threshold *= tp ? halve_threshold : halve_threshold_root;
-       used -= tn->empty_children;
+       threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold;
+       used -= tn_info(tn)->empty_children;
 
        /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
 
        return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
 }
 
-static bool should_collapse(const struct tnode *tn)
+static inline bool should_collapse(struct key_vector *tn)
 {
-       unsigned long used = tnode_child_length(tn);
+       unsigned long used = child_length(tn);
 
-       used -= tn->empty_children;
+       used -= tn_info(tn)->empty_children;
 
        /* account for bits == KEYLENGTH case */
-       if ((tn->bits == KEYLENGTH) && tn->full_children)
+       if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children)
                used -= KEY_MAX;
 
        /* One child or none, time to drop us from the trie */
@@ -773,10 +809,13 @@ static bool should_collapse(const struct tnode *tn)
 }
 
 #define MAX_WORK 10
-static void resize(struct trie *t, struct tnode *tn)
+static struct key_vector *resize(struct trie *t, struct key_vector *tn)
 {
-       struct tnode *tp = node_parent(tn);
-       struct tnode __rcu **cptr;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+       struct trie_use_stats __percpu *stats = t->stats;
+#endif
+       struct key_vector *tp = node_parent(tn);
+       unsigned long cindex = get_index(tn->key, tp);
        int max_work = MAX_WORK;
 
        pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
@@ -786,158 +825,125 @@ static void resize(struct trie *t, struct tnode *tn)
         * doing it ourselves.  This way we can let RCU fully do its
         * thing without us interfering
         */
-       cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
-       BUG_ON(tn != rtnl_dereference(*cptr));
+       BUG_ON(tn != get_child(tp, cindex));
 
        /* Double as long as the resulting node has a number of
         * nonempty nodes that are above the threshold.
         */
        while (should_inflate(tp, tn) && max_work) {
-               if (inflate(t, tn)) {
+               tp = inflate(t, tn);
+               if (!tp) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       this_cpu_inc(t->stats->resize_node_skipped);
+                       this_cpu_inc(stats->resize_node_skipped);
 #endif
                        break;
                }
 
                max_work--;
-               tn = rtnl_dereference(*cptr);
+               tn = get_child(tp, cindex);
        }
 
        /* Return if at least one inflate is run */
        if (max_work != MAX_WORK)
-               return;
+               return node_parent(tn);
 
        /* Halve as long as the number of empty children in this
         * node is above threshold.
         */
        while (should_halve(tp, tn) && max_work) {
-               if (halve(t, tn)) {
+               tp = halve(t, tn);
+               if (!tp) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-                       this_cpu_inc(t->stats->resize_node_skipped);
+                       this_cpu_inc(stats->resize_node_skipped);
 #endif
                        break;
                }
 
                max_work--;
-               tn = rtnl_dereference(*cptr);
+               tn = get_child(tp, cindex);
        }
 
        /* Only one child remains */
-       if (should_collapse(tn)) {
-               collapse(t, tn);
-               return;
-       }
+       if (should_collapse(tn))
+               return collapse(t, tn);
+
+       /* update parent in case inflate or halve failed */
+       tp = node_parent(tn);
 
        /* Return if at least one deflate was run */
        if (max_work != MAX_WORK)
-               return;
+               return tp;
 
        /* push the suffix length to the parent node */
        if (tn->slen > tn->pos) {
                unsigned char slen = update_suffix(tn);
 
-               if (tp && (slen > tp->slen))
+               if (slen > tp->slen)
                        tp->slen = slen;
        }
+
+       return tp;
 }
 
-static void leaf_pull_suffix(struct tnode *l)
+static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l)
 {
-       struct tnode *tp = node_parent(l);
-
-       while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
+       while ((tp->slen > tp->pos) && (tp->slen > l->slen)) {
                if (update_suffix(tp) > l->slen)
                        break;
                tp = node_parent(tp);
        }
 }
 
-static void leaf_push_suffix(struct tnode *l)
+static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l)
 {
-       struct tnode *tn = node_parent(l);
-
        /* if this is a new leaf then tn will be NULL and we can sort
         * out parent suffix lengths as a part of trie_rebalance
         */
-       while (tn && (tn->slen < l->slen)) {
+       while (tn->slen < l->slen) {
                tn->slen = l->slen;
                tn = node_parent(tn);
        }
 }
 
-static void fib_remove_alias(struct tnode *l, struct fib_alias *old)
-{
-       /* record the location of the previous list_info entry */
-       struct hlist_node **pprev = old->fa_list.pprev;
-       struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
-
-       /* remove the fib_alias from the list */
-       hlist_del_rcu(&old->fa_list);
-
-       /* only access fa if it is pointing at the last valid hlist_node */
-       if (hlist_empty(&l->leaf) || (*pprev))
-               return;
-
-       /* update the trie with the latest suffix length */
-       l->slen = fa->fa_slen;
-       leaf_pull_suffix(l);
-}
-
-static void fib_insert_alias(struct tnode *l, struct fib_alias *fa,
-                            struct fib_alias *new)
+/* rcu_read_lock needs to be hold by caller from readside */
+static struct key_vector *fib_find_node(struct trie *t,
+                                       struct key_vector **tp, u32 key)
 {
-       if (fa) {
-               hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
-       } else {
-               struct fib_alias *last;
+       struct key_vector *pn, *n = t->kv;
+       unsigned long index = 0;
 
-               hlist_for_each_entry(last, &l->leaf, fa_list) {
-                       if (new->fa_slen < last->fa_slen)
-                               break;
-                       fa = last;
-               }
-
-               if (fa)
-                       hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
-               else
-                       hlist_add_head_rcu(&new->fa_list, &l->leaf);
-       }
-
-       /* if we added to the tail node then we need to update slen */
-       if (l->slen < new->fa_slen) {
-               l->slen = new->fa_slen;
-               leaf_push_suffix(l);
-       }
-}
+       do {
+               pn = n;
+               n = get_child_rcu(n, index);
 
-/* rcu_read_lock needs to be hold by caller from readside */
-static struct tnode *fib_find_node(struct trie *t, u32 key)
-{
-       struct tnode *n = rcu_dereference_rtnl(t->trie);
+               if (!n)
+                       break;
 
-       while (n) {
-               unsigned long index = get_index(key, n);
+               index = get_cindex(key, n);
 
                /* This bit of code is a bit tricky but it combines multiple
                 * checks into a single check.  The prefix consists of the
                 * prefix plus zeros for the bits in the cindex. The index
                 * is the difference between the key and this value.  From
                 * this we can actually derive several pieces of data.
-                *   if (index & (~0ul << bits))
+                *   if (index >= (1ul << bits))
                 *     we have a mismatch in skip bits and failed
                 *   else
                 *     we know the value is cindex
+                *
+                * This check is safe even if bits == KEYLENGTH due to the
+                * fact that we can only allocate a node with 32 bits if a
+                * long is greater than 32 bits.
                 */
-               if (index & (~0ul << n->bits))
-                       return NULL;
-
-               /* we have found a leaf. Prefixes have already been compared */
-               if (IS_LEAF(n))
+               if (index >= (1ul << n->bits)) {
+                       n = NULL;
                        break;
+               }
 
-               n = tnode_get_child_rcu(n, index);
-       }
+               /* keep searching until we find a perfect match leaf or NULL */
+       } while (IS_TNODE(n));
+
+       *tp = pn;
 
        return n;
 }
@@ -967,65 +973,23 @@ static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
        return NULL;
 }
 
-static void trie_rebalance(struct trie *t, struct tnode *tn)
+static void trie_rebalance(struct trie *t, struct key_vector *tn)
 {
-       struct tnode *tp;
-
-       while ((tp = node_parent(tn)) != NULL) {
-               resize(t, tn);
-               tn = tp;
-       }
-
-       /* Handle last (top) tnode */
-       if (IS_TNODE(tn))
-               resize(t, tn);
+       while (!IS_TRIE(tn))
+               tn = resize(t, tn);
 }
 
-/* only used from updater-side */
-
-static struct tnode *fib_insert_node(struct trie *t, u32 key, int plen)
+static int fib_insert_node(struct trie *t, struct key_vector *tp,
+                          struct fib_alias *new, t_key key)
 {
-       struct tnode *l, *n, *tp = NULL;
-
-       n = rtnl_dereference(t->trie);
-
-       /* If we point to NULL, stop. Either the tree is empty and we should
-        * just put a new leaf in if, or we have reached an empty child slot,
-        * and we should just put our new leaf in that.
-        *
-        * If we hit a node with a key that does't match then we should stop
-        * and create a new tnode to replace that node and insert ourselves
-        * and the other node into the new tnode.
-        */
-       while (n) {
-               unsigned long index = get_index(key, n);
-
-               /* This bit of code is a bit tricky but it combines multiple
-                * checks into a single check.  The prefix consists of the
-                * prefix plus zeros for the "bits" in the prefix. The index
-                * is the difference between the key and this value.  From
-                * this we can actually derive several pieces of data.
-                *   if !(index >> bits)
-                *     we know the value is child index
-                *   else
-                *     we have a mismatch in skip bits and failed
-                */
-               if (index >> n->bits)
-                       break;
+       struct key_vector *n, *l;
 
-               /* we have found a leaf. Prefixes have already been compared */
-               if (IS_LEAF(n)) {
-                       /* Case 1: n is a leaf, and prefixes match*/
-                       return n;
-               }
-
-               tp = n;
-               n = tnode_get_child_rcu(n, index);
-       }
-
-       l = leaf_new(key);
+       l = leaf_new(key, new);
        if (!l)
-               return NULL;
+               goto noleaf;
+
+       /* retrieve child from parent node */
+       n = get_child(tp, get_index(key, tp));
 
        /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
         *
@@ -1034,20 +998,18 @@ static struct tnode *fib_insert_node(struct trie *t, u32 key, int plen)
         *  leaves us in position for handling as case 3
         */
        if (n) {
-               struct tnode *tn;
+               struct key_vector *tn;
 
                tn = tnode_new(key, __fls(key ^ n->key), 1);
-               if (!tn) {
-                       node_free(l);
-                       return NULL;
-               }
+               if (!tn)
+                       goto notnode;
 
                /* initialize routes out of node */
                NODE_INIT_PARENT(tn, tp);
                put_child(tn, get_index(key, tn) ^ 1, n);
 
                /* start adding routes into the node */
-               put_child_root(tp, t, key, tn);
+               put_child_root(tp, key, tn);
                node_set_parent(n, tn);
 
                /* parent now has a NULL spot where the leaf can go */
@@ -1055,31 +1017,62 @@ static struct tnode *fib_insert_node(struct trie *t, u32 key, int plen)
        }
 
        /* Case 3: n is NULL, and will just insert a new leaf */
-       if (tp) {
-               NODE_INIT_PARENT(l, tp);
-               put_child(tp, get_index(key, tp), l);
-               trie_rebalance(t, tp);
+       NODE_INIT_PARENT(l, tp);
+       put_child_root(tp, key, l);
+       trie_rebalance(t, tp);
+
+       return 0;
+notnode:
+       node_free(l);
+noleaf:
+       return -ENOMEM;
+}
+
+static int fib_insert_alias(struct trie *t, struct key_vector *tp,
+                           struct key_vector *l, struct fib_alias *new,
+                           struct fib_alias *fa, t_key key)
+{
+       if (!l)
+               return fib_insert_node(t, tp, new, key);
+
+       if (fa) {
+               hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
        } else {
-               rcu_assign_pointer(t->trie, l);
+               struct fib_alias *last;
+
+               hlist_for_each_entry(last, &l->leaf, fa_list) {
+                       if (new->fa_slen < last->fa_slen)
+                               break;
+                       fa = last;
+               }
+
+               if (fa)
+                       hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
+               else
+                       hlist_add_head_rcu(&new->fa_list, &l->leaf);
        }
 
-       return l;
+       /* if we added to the tail node then we need to update slen */
+       if (l->slen < new->fa_slen) {
+               l->slen = new->fa_slen;
+               leaf_push_suffix(tp, l);
+       }
+
+       return 0;
 }
 
-/*
- * Caller must hold RTNL.
- */
+/* Caller must hold RTNL. */
 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
 {
-       struct trie *t = (struct trie *) tb->tb_data;
+       struct trie *t = (struct trie *)tb->tb_data;
        struct fib_alias *fa, *new_fa;
+       struct key_vector *l, *tp;
        struct fib_info *fi;
        u8 plen = cfg->fc_dst_len;
        u8 slen = KEYLENGTH - plen;
        u8 tos = cfg->fc_tos;
-       u32 key, mask;
+       u32 key;
        int err;
-       struct tnode *l;
 
        if (plen > KEYLENGTH)
                return -EINVAL;
@@ -1088,9 +1081,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
 
        pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
 
-       mask = ntohl(inet_make_mask(plen));
-
-       if (key & ~mask)
+       if ((plen < KEYLENGTH) && (key << plen))
                return -EINVAL;
 
        fi = fib_create_info(cfg);
@@ -1099,7 +1090,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                goto err;
        }
 
-       l = fib_find_node(t, key);
+       l = fib_find_node(t, &tp, key);
        fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority) : NULL;
 
        /* Now fa, if non-NULL, points to the first fib alias
@@ -1161,7 +1152,18 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        new_fa->fa_state = state & ~FA_S_ACCESSED;
                        new_fa->fa_slen = fa->fa_slen;
 
+                       err = netdev_switch_fib_ipv4_add(key, plen, fi,
+                                                        new_fa->fa_tos,
+                                                        cfg->fc_type,
+                                                        tb->tb_id);
+                       if (err) {
+                               netdev_switch_fib_ipv4_abort(fi);
+                               kmem_cache_free(fn_alias_kmem, new_fa);
+                               goto out;
+                       }
+
                        hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
+
                        alias_free_mem_rcu(fa);
 
                        fib_release_info(fi_drop);
@@ -1197,26 +1199,30 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
        new_fa->fa_state = 0;
        new_fa->fa_slen = slen;
 
-       /* Insert new entry to the list. */
-       if (!l) {
-               l = fib_insert_node(t, key, plen);
-               if (unlikely(!l)) {
-                       err = -ENOMEM;
-                       goto out_free_new_fa;
-               }
+       /* (Optionally) offload fib entry to switch hardware. */
+       err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
+                                        cfg->fc_type, tb->tb_id);
+       if (err) {
+               netdev_switch_fib_ipv4_abort(fi);
+               goto out_free_new_fa;
        }
 
+       /* Insert new entry to the list. */
+       err = fib_insert_alias(t, tp, l, new_fa, fa, key);
+       if (err)
+               goto out_sw_fib_del;
+
        if (!plen)
                tb->tb_num_default++;
 
-       fib_insert_alias(l, fa, new_fa);
-
        rt_cache_flush(cfg->fc_nlinfo.nl_net);
        rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
 succeeded:
        return 0;
 
+out_sw_fib_del:
+       netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
 out_free_new_fa:
        kmem_cache_free(fn_alias_kmem, new_fa);
 out:
@@ -1225,7 +1231,7 @@ err:
        return err;
 }
 
-static inline t_key prefix_mismatch(t_key key, struct tnode *n)
+static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
 {
        t_key prefix = n->key;
 
@@ -1241,11 +1247,15 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
        struct trie_use_stats __percpu *stats = t->stats;
 #endif
        const t_key key = ntohl(flp->daddr);
-       struct tnode *n, *pn;
+       struct key_vector *n, *pn;
        struct fib_alias *fa;
+       unsigned long index;
        t_key cindex;
 
-       n = rcu_dereference(t->trie);
+       pn = t->kv;
+       cindex = 0;
+
+       n = get_child_rcu(pn, cindex);
        if (!n)
                return -EAGAIN;
 
@@ -1253,24 +1263,25 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
        this_cpu_inc(stats->gets);
 #endif
 
-       pn = n;
-       cindex = 0;
-
        /* Step 1: Travel to the longest prefix match in the trie */
        for (;;) {
-               unsigned long index = get_index(key, n);
+               index = get_cindex(key, n);
 
                /* This bit of code is a bit tricky but it combines multiple
                 * checks into a single check.  The prefix consists of the
                 * prefix plus zeros for the "bits" in the prefix. The index
                 * is the difference between the key and this value.  From
                 * this we can actually derive several pieces of data.
-                *   if (index & (~0ul << bits))
+                *   if (index >= (1ul << bits))
                 *     we have a mismatch in skip bits and failed
                 *   else
                 *     we know the value is cindex
+                *
+                * This check is safe even if bits == KEYLENGTH due to the
+                * fact that we can only allocate a node with 32 bits if a
+                * long is greater than 32 bits.
                 */
-               if (index & (~0ul << n->bits))
+               if (index >= (1ul << n->bits))
                        break;
 
                /* we have found a leaf. Prefixes have already been compared */
@@ -1285,7 +1296,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                        cindex = index;
                }
 
-               n = tnode_get_child_rcu(n, index);
+               n = get_child_rcu(n, index);
                if (unlikely(!n))
                        goto backtrace;
        }
@@ -1293,7 +1304,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
        /* Step 2: Sort out leaves and begin backtracing for longest prefix */
        for (;;) {
                /* record the pointer where our next node pointer is stored */
-               struct tnode __rcu **cptr = n->child;
+               struct key_vector __rcu **cptr = n->tnode;
 
                /* This test verifies that none of the bits that differ
                 * between the key and the prefix exist in the region of
@@ -1325,13 +1336,17 @@ backtrace:
                        while (!cindex) {
                                t_key pkey = pn->key;
 
-                               pn = node_parent_rcu(pn);
-                               if (unlikely(!pn))
+                               /* If we don't have a parent then there is
+                                * nothing for us to do as we do not have any
+                                * further nodes to parse.
+                                */
+                               if (IS_TRIE(pn))
                                        return -EAGAIN;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
                                this_cpu_inc(stats->backtrack);
 #endif
                                /* Get Child's index */
+                               pn = node_parent_rcu(pn);
                                cindex = get_index(pkey, pn);
                        }
 
@@ -1339,19 +1354,22 @@ backtrace:
                        cindex &= cindex - 1;
 
                        /* grab pointer for next child node */
-                       cptr = &pn->child[cindex];
+                       cptr = &pn->tnode[cindex];
                }
        }
 
 found:
+       /* this line carries forward the xor from earlier in the function */
+       index = key ^ n->key;
+
        /* Step 3: Process the leaf, if that fails fall back to backtracing */
        hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
                struct fib_info *fi = fa->fa_info;
                int nhsel, err;
 
-               if (((key ^ n->key) >= (1ul << fa->fa_slen)) &&
+               if ((index >= (1ul << fa->fa_slen)) &&
                    ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
-                               continue;
+                       continue;
                if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
                        continue;
                if (fi->fib_dead)
@@ -1399,53 +1417,59 @@ found:
 }
 EXPORT_SYMBOL_GPL(fib_table_lookup);
 
-/*
- * Remove the leaf and return parent.
- */
-static void trie_leaf_remove(struct trie *t, struct tnode *l)
+static void fib_remove_alias(struct trie *t, struct key_vector *tp,
+                            struct key_vector *l, struct fib_alias *old)
 {
-       struct tnode *tp = node_parent(l);
+       /* record the location of the previous list_info entry */
+       struct hlist_node **pprev = old->fa_list.pprev;
+       struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
 
-       pr_debug("entering trie_leaf_remove(%p)\n", l);
+       /* remove the fib_alias from the list */
+       hlist_del_rcu(&old->fa_list);
 
-       if (tp) {
-               put_child(tp, get_index(l->key, tp), NULL);
+       /* if we emptied the list this leaf will be freed and we can sort
+        * out parent suffix lengths as a part of trie_rebalance
+        */
+       if (hlist_empty(&l->leaf)) {
+               put_child_root(tp, l->key, NULL);
+               node_free(l);
                trie_rebalance(t, tp);
-       } else {
-               RCU_INIT_POINTER(t->trie, NULL);
+               return;
        }
 
-       node_free(l);
+       /* only access fa if it is pointing at the last valid hlist_node */
+       if (*pprev)
+               return;
+
+       /* update the trie with the latest suffix length */
+       l->slen = fa->fa_slen;
+       leaf_pull_suffix(tp, l);
 }
 
-/*
- * Caller must hold RTNL.
- */
+/* Caller must hold RTNL. */
 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
 {
        struct trie *t = (struct trie *) tb->tb_data;
        struct fib_alias *fa, *fa_to_delete;
+       struct key_vector *l, *tp;
        u8 plen = cfg->fc_dst_len;
-       u8 tos = cfg->fc_tos;
        u8 slen = KEYLENGTH - plen;
-       struct tnode *l;
-       u32 key, mask;
+       u8 tos = cfg->fc_tos;
+       u32 key;
 
        if (plen > KEYLENGTH)
                return -EINVAL;
 
        key = ntohl(cfg->fc_dst);
-       mask = ntohl(inet_make_mask(plen));
 
-       if (key & ~mask)
+       if ((plen < KEYLENGTH) && (key << plen))
                return -EINVAL;
 
-       l = fib_find_node(t, key);
+       l = fib_find_node(t, &tp, key);
        if (!l)
                return -ESRCH;
 
        fa = fib_find_alias(&l->leaf, slen, tos, 0);
-
        if (!fa)
                return -ESRCH;
 
@@ -1474,150 +1498,215 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        if (!fa_to_delete)
                return -ESRCH;
 
-       fa = fa_to_delete;
-       rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
-                 &cfg->fc_nlinfo, 0);
+       netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
+                                  cfg->fc_type, tb->tb_id);
 
-       fib_remove_alias(l, fa);
+       rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
+                 &cfg->fc_nlinfo, 0);
 
        if (!plen)
                tb->tb_num_default--;
 
-       if (hlist_empty(&l->leaf))
-               trie_leaf_remove(t, l);
+       fib_remove_alias(t, tp, l, fa_to_delete);
 
-       if (fa->fa_state & FA_S_ACCESSED)
+       if (fa_to_delete->fa_state & FA_S_ACCESSED)
                rt_cache_flush(cfg->fc_nlinfo.nl_net);
 
-       fib_release_info(fa->fa_info);
-       alias_free_mem_rcu(fa);
+       fib_release_info(fa_to_delete->fa_info);
+       alias_free_mem_rcu(fa_to_delete);
        return 0;
 }
 
-static int trie_flush_leaf(struct tnode *l)
+/* Scan for the next leaf starting at the provided key value */
+static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
 {
-       struct hlist_node *tmp;
-       unsigned char slen = 0;
-       struct fib_alias *fa;
-       int found = 0;
+       struct key_vector *pn, *n = *tn;
+       unsigned long cindex;
 
-       hlist_for_each_entry_safe(fa, tmp, &l->leaf, fa_list) {
-               struct fib_info *fi = fa->fa_info;
+       /* this loop is meant to try and find the key in the trie */
+       do {
+               /* record parent and next child index */
+               pn = n;
+               cindex = get_index(key, pn);
 
-               if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
-                       hlist_del_rcu(&fa->fa_list);
-                       fib_release_info(fa->fa_info);
-                       alias_free_mem_rcu(fa);
-                       found++;
+               if (cindex >> pn->bits)
+                       break;
 
+               /* descend into the next child */
+               n = get_child_rcu(pn, cindex++);
+               if (!n)
+                       break;
+
+               /* guarantee forward progress on the keys */
+               if (IS_LEAF(n) && (n->key >= key))
+                       goto found;
+       } while (IS_TNODE(n));
+
+       /* this loop will search for the next leaf with a greater key */
+       while (!IS_TRIE(pn)) {
+               /* if we exhausted the parent node we will need to climb */
+               if (cindex >= (1ul << pn->bits)) {
+                       t_key pkey = pn->key;
+
+                       pn = node_parent_rcu(pn);
+                       cindex = get_index(pkey, pn) + 1;
                        continue;
                }
 
-               slen = fa->fa_slen;
-       }
+               /* grab the next available node */
+               n = get_child_rcu(pn, cindex++);
+               if (!n)
+                       continue;
+
+               /* no need to compare keys since we bumped the index */
+               if (IS_LEAF(n))
+                       goto found;
 
-       l->slen = slen;
+               /* Rescan start scanning in new node */
+               pn = n;
+               cindex = 0;
+       }
 
-       return found;
+       *tn = pn;
+       return NULL; /* Root of trie */
+found:
+       /* if we are at the limit for keys just return NULL for the tnode */
+       *tn = pn;
+       return n;
 }
 
-/* Scan for the next right leaf starting at node p->child[idx]
- * Since we have back pointer, no recursion necessary.
- */
-static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
+/* Caller must hold RTNL */
+void fib_table_flush_external(struct fib_table *tb)
 {
-       do {
-               unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *pn = t->kv;
+       unsigned long cindex = 1;
+       struct hlist_node *tmp;
+       struct fib_alias *fa;
 
-               while (idx < tnode_child_length(p)) {
-                       c = tnode_get_child_rcu(p, idx++);
-                       if (!c)
-                               continue;
+       /* walk trie in reverse order */
+       for (;;) {
+               struct key_vector *n;
 
-                       if (IS_LEAF(c))
-                               return c;
+               if (!(cindex--)) {
+                       t_key pkey = pn->key;
 
-                       /* Rescan start scanning in new node */
-                       p = c;
-                       idx = 0;
+                       /* cannot resize the trie vector */
+                       if (IS_TRIE(pn))
+                               break;
+
+                       /* no need to resize like in flush below */
+                       pn = node_parent(pn);
+                       cindex = get_index(pkey, pn);
+
+                       continue;
                }
 
-               /* Node empty, walk back up to parent */
-               c = p;
-       } while ((p = node_parent_rcu(c)) != NULL);
+               /* grab the next available node */
+               n = get_child(pn, cindex);
+               if (!n)
+                       continue;
 
-       return NULL; /* Root of trie */
-}
+               if (IS_TNODE(n)) {
+                       /* record pn and cindex for leaf walking */
+                       pn = n;
+                       cindex = 1ul << n->bits;
 
-static struct tnode *trie_firstleaf(struct trie *t)
-{
-       struct tnode *n = rcu_dereference_rtnl(t->trie);
+                       continue;
+               }
 
-       if (!n)
-               return NULL;
+               hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+                       struct fib_info *fi = fa->fa_info;
 
-       if (IS_LEAF(n))          /* trie is just a leaf */
-               return n;
+                       if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
+                               continue;
 
-       return leaf_walk_rcu(n, NULL);
+                       netdev_switch_fib_ipv4_del(n->key,
+                                                  KEYLENGTH - fa->fa_slen,
+                                                  fi, fa->fa_tos,
+                                                  fa->fa_type, tb->tb_id);
+               }
+       }
 }
 
-static struct tnode *trie_nextleaf(struct tnode *l)
+/* Caller must hold RTNL. */
+int fib_table_flush(struct fib_table *tb)
 {
-       struct tnode *p = node_parent_rcu(l);
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *pn = t->kv;
+       unsigned long cindex = 1;
+       struct hlist_node *tmp;
+       struct fib_alias *fa;
+       int found = 0;
 
-       if (!p)
-               return NULL;    /* trie with just one leaf */
+       /* walk trie in reverse order */
+       for (;;) {
+               unsigned char slen = 0;
+               struct key_vector *n;
 
-       return leaf_walk_rcu(p, l);
-}
+               if (!(cindex--)) {
+                       t_key pkey = pn->key;
 
-static struct tnode *trie_leafindex(struct trie *t, int index)
-{
-       struct tnode *l = trie_firstleaf(t);
+                       /* cannot resize the trie vector */
+                       if (IS_TRIE(pn))
+                               break;
 
-       while (l && index-- > 0)
-               l = trie_nextleaf(l);
+                       /* resize completed node */
+                       pn = resize(t, pn);
+                       cindex = get_index(pkey, pn);
 
-       return l;
-}
+                       continue;
+               }
 
+               /* grab the next available node */
+               n = get_child(pn, cindex);
+               if (!n)
+                       continue;
 
-/*
- * Caller must hold RTNL.
- */
-int fib_table_flush(struct fib_table *tb)
-{
-       struct trie *t = (struct trie *) tb->tb_data;
-       struct tnode *l, *ll = NULL;
-       int found = 0;
+               if (IS_TNODE(n)) {
+                       /* record pn and cindex for leaf walking */
+                       pn = n;
+                       cindex = 1ul << n->bits;
 
-       for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
-               found += trie_flush_leaf(l);
+                       continue;
+               }
 
-               if (ll) {
-                       if (hlist_empty(&ll->leaf))
-                               trie_leaf_remove(t, ll);
-                       else
-                               leaf_pull_suffix(ll);
+               hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+                       struct fib_info *fi = fa->fa_info;
+
+                       if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
+                               slen = fa->fa_slen;
+                               continue;
+                       }
+
+                       netdev_switch_fib_ipv4_del(n->key,
+                                                  KEYLENGTH - fa->fa_slen,
+                                                  fi, fa->fa_tos,
+                                                  fa->fa_type, tb->tb_id);
+                       hlist_del_rcu(&fa->fa_list);
+                       fib_release_info(fa->fa_info);
+                       alias_free_mem_rcu(fa);
+                       found++;
                }
 
-               ll = l;
-       }
+               /* update leaf slen */
+               n->slen = slen;
 
-       if (ll) {
-               if (hlist_empty(&ll->leaf))
-                       trie_leaf_remove(t, ll);
-               else
-                       leaf_pull_suffix(ll);
+               if (hlist_empty(&n->leaf)) {
+                       put_child_root(pn, n->key, NULL);
+                       node_free(n);
+               } else {
+                       leaf_pull_suffix(pn, n);
+               }
        }
 
        pr_debug("trie_flush found=%d\n", found);
        return found;
 }
 
-void fib_free_table(struct fib_table *tb)
+static void __trie_free_rcu(struct rcu_head *head)
 {
+       struct fib_table *tb = container_of(head, struct fib_table, rcu);
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie *t = (struct trie *)tb->tb_data;
 
@@ -1626,7 +1715,12 @@ void fib_free_table(struct fib_table *tb)
        kfree(tb);
 }
 
-static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
+void fib_free_table(struct fib_table *tb)
+{
+       call_rcu(&tb->rcu, __trie_free_rcu);
+}
+
+static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
                             struct sk_buff *skb, struct netlink_callback *cb)
 {
        __be32 xkey = htonl(l->key);
@@ -1662,44 +1756,38 @@ static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
        return skb->len;
 }
 
+/* rcu_read_lock needs to be hold by caller from readside */
 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
                   struct netlink_callback *cb)
 {
-       struct tnode *l;
-       struct trie *t = (struct trie *) tb->tb_data;
-       t_key key = cb->args[2];
-       int count = cb->args[3];
-
-       rcu_read_lock();
+       struct trie *t = (struct trie *)tb->tb_data;
+       struct key_vector *l, *tp = t->kv;
        /* Dump starting at last key.
         * Note: 0.0.0.0/0 (ie default) is first key.
         */
-       if (count == 0)
-               l = trie_firstleaf(t);
-       else {
-               /* Normally, continue from last key, but if that is missing
-                * fallback to using slow rescan
-                */
-               l = fib_find_node(t, key);
-               if (!l)
-                       l = trie_leafindex(t, count);
-       }
+       int count = cb->args[2];
+       t_key key = cb->args[3];
 
-       while (l) {
-               cb->args[2] = l->key;
+       while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
                if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
-                       cb->args[3] = count;
-                       rcu_read_unlock();
+                       cb->args[3] = key;
+                       cb->args[2] = count;
                        return -1;
                }
 
                ++count;
-               l = trie_nextleaf(l);
+               key = l->key + 1;
+
                memset(&cb->args[4], 0,
                       sizeof(cb->args) - 4*sizeof(cb->args[0]));
+
+               /* stop loop if key wrapped back to 0 */
+               if (key < l->key)
+                       break;
        }
-       cb->args[3] = count;
-       rcu_read_unlock();
+
+       cb->args[3] = key;
+       cb->args[2] = count;
 
        return skb->len;
 }
@@ -1711,18 +1799,16 @@ void __init fib_trie_init(void)
                                          0, SLAB_PANIC, NULL);
 
        trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
-                                          sizeof(struct tnode),
+                                          LEAF_SIZE,
                                           0, SLAB_PANIC, NULL);
 }
 
-
 struct fib_table *fib_trie_table(u32 id)
 {
        struct fib_table *tb;
        struct trie *t;
 
-       tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
-                    GFP_KERNEL);
+       tb = kzalloc(sizeof(*tb) + sizeof(struct trie), GFP_KERNEL);
        if (tb == NULL)
                return NULL;
 
@@ -1731,7 +1817,8 @@ struct fib_table *fib_trie_table(u32 id)
        tb->tb_num_default = 0;
 
        t = (struct trie *) tb->tb_data;
-       RCU_INIT_POINTER(t->trie, NULL);
+       t->kv[0].pos = KEYLENGTH;
+       t->kv[0].slen = KEYLENGTH;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        t->stats = alloc_percpu(struct trie_use_stats);
        if (!t->stats) {
@@ -1748,65 +1835,63 @@ struct fib_table *fib_trie_table(u32 id)
 struct fib_trie_iter {
        struct seq_net_private p;
        struct fib_table *tb;
-       struct tnode *tnode;
+       struct key_vector *tnode;
        unsigned int index;
        unsigned int depth;
 };
 
-static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
 {
        unsigned long cindex = iter->index;
-       struct tnode *tn = iter->tnode;
-       struct tnode *p;
-
-       /* A single entry routing table */
-       if (!tn)
-               return NULL;
+       struct key_vector *pn = iter->tnode;
+       t_key pkey;
 
        pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
                 iter->tnode, iter->index, iter->depth);
-rescan:
-       while (cindex < tnode_child_length(tn)) {
-               struct tnode *n = tnode_get_child_rcu(tn, cindex);
 
-               if (n) {
+       while (!IS_TRIE(pn)) {
+               while (cindex < child_length(pn)) {
+                       struct key_vector *n = get_child_rcu(pn, cindex++);
+
+                       if (!n)
+                               continue;
+
                        if (IS_LEAF(n)) {
-                               iter->tnode = tn;
-                               iter->index = cindex + 1;
+                               iter->tnode = pn;
+                               iter->index = cindex;
                        } else {
                                /* push down one level */
                                iter->tnode = n;
                                iter->index = 0;
                                ++iter->depth;
                        }
+
                        return n;
                }
 
-               ++cindex;
-       }
-
-       /* Current node exhausted, pop back up */
-       p = node_parent_rcu(tn);
-       if (p) {
-               cindex = get_index(tn->key, p) + 1;
-               tn = p;
+               /* Current node exhausted, pop back up */
+               pkey = pn->key;
+               pn = node_parent_rcu(pn);
+               cindex = get_index(pkey, pn) + 1;
                --iter->depth;
-               goto rescan;
        }
 
-       /* got root? */
+       /* record root node so further searches know we are done */
+       iter->tnode = pn;
+       iter->index = 0;
+
        return NULL;
 }
 
-static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
-                                      struct trie *t)
+static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
+                                            struct trie *t)
 {
-       struct tnode *n;
+       struct key_vector *n, *pn = t->kv;
 
        if (!t)
                return NULL;
 
-       n = rcu_dereference(t->trie);
+       n = rcu_dereference(pn->tnode[0]);
        if (!n)
                return NULL;
 
@@ -1815,7 +1900,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
                iter->index = 0;
                iter->depth = 1;
        } else {
-               iter->tnode = NULL;
+               iter->tnode = pn;
                iter->index = 0;
                iter->depth = 0;
        }
@@ -1825,7 +1910,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
 
 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
 {
-       struct tnode *n;
+       struct key_vector *n;
        struct fib_trie_iter iter;
 
        memset(s, 0, sizeof(*s));
@@ -1846,7 +1931,7 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
                        s->tnodes++;
                        if (n->bits < MAX_STAT_DEPTH)
                                s->nodesizes[n->bits]++;
-                       s->nullpointers += n->empty_children;
+                       s->nullpointers += tn_info(n)->empty_children;
                }
        }
        rcu_read_unlock();
@@ -1869,13 +1954,13 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_printf(seq, "\tMax depth:      %u\n", stat->maxdepth);
 
        seq_printf(seq, "\tLeaves:         %u\n", stat->leaves);
-       bytes = sizeof(struct tnode) * stat->leaves;
+       bytes = LEAF_SIZE * stat->leaves;
 
        seq_printf(seq, "\tPrefixes:       %u\n", stat->prefixes);
        bytes += sizeof(struct fib_alias) * stat->prefixes;
 
        seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
-       bytes += sizeof(struct tnode) * stat->tnodes;
+       bytes += TNODE_SIZE(0) * stat->tnodes;
 
        max = MAX_STAT_DEPTH;
        while (max > 0 && stat->nodesizes[max-1] == 0)
@@ -1890,7 +1975,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_putc(seq, '\n');
        seq_printf(seq, "\tPointers: %u\n", pointers);
 
-       bytes += sizeof(struct tnode *) * pointers;
+       bytes += sizeof(struct key_vector *) * pointers;
        seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
        seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
 }
@@ -1944,7 +2029,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "Basic info: size of leaf:"
                   " %Zd bytes, size of tnode: %Zd bytes.\n",
-                  sizeof(struct tnode), sizeof(struct tnode));
+                  LEAF_SIZE, TNODE_SIZE(0));
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[h];
@@ -1983,7 +2068,7 @@ static const struct file_operations fib_triestat_fops = {
        .release = single_release_net,
 };
 
-static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 {
        struct fib_trie_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
@@ -1995,7 +2080,7 @@ static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
                struct fib_table *tb;
 
                hlist_for_each_entry_rcu(tb, head, tb_hlist) {
-                       struct tnode *n;
+                       struct key_vector *n;
 
                        for (n = fib_trie_get_first(iter,
                                                    (struct trie *) tb->tb_data);
@@ -2024,7 +2109,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct fib_table *tb = iter->tb;
        struct hlist_node *tb_node;
        unsigned int h;
-       struct tnode *n;
+       struct key_vector *n;
 
        ++*pos;
        /* next node in same table */
@@ -2110,9 +2195,9 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
 static int fib_trie_seq_show(struct seq_file *seq, void *v)
 {
        const struct fib_trie_iter *iter = seq->private;
-       struct tnode *n = v;
+       struct key_vector *n = v;
 
-       if (!node_parent_rcu(n))
+       if (IS_TRIE(node_parent_rcu(n)))
                fib_table_print(seq, iter->tb);
 
        if (IS_TNODE(n)) {
@@ -2121,7 +2206,8 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
                seq_indent(seq, iter->depth-1);
                seq_printf(seq, "  +-- %pI4/%zu %u %u %u\n",
                           &prf, KEYLENGTH - n->pos - n->bits, n->bits,
-                          n->full_children, n->empty_children);
+                          tn_info(n)->full_children,
+                          tn_info(n)->empty_children);
        } else {
                __be32 val = htonl(n->key);
                struct fib_alias *fa;
@@ -2171,31 +2257,47 @@ static const struct file_operations fib_trie_fops = {
 
 struct fib_route_iter {
        struct seq_net_private p;
-       struct trie *main_trie;
+       struct fib_table *main_tb;
+       struct key_vector *tnode;
        loff_t  pos;
        t_key   key;
 };
 
-static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
+static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+                                           loff_t pos)
 {
-       struct tnode *l = NULL;
-       struct trie *t = iter->main_trie;
+       struct fib_table *tb = iter->main_tb;
+       struct key_vector *l, **tp = &iter->tnode;
+       struct trie *t;
+       t_key key;
 
-       /* use cache location of last found key */
-       if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
+       /* use cache location of next-to-find key */
+       if (iter->pos > 0 && pos >= iter->pos) {
                pos -= iter->pos;
-       else {
+               key = iter->key;
+       } else {
+               t = (struct trie *)tb->tb_data;
+               iter->tnode = t->kv;
                iter->pos = 0;
-               l = trie_firstleaf(t);
+               key = 0;
        }
 
-       while (l && pos-- > 0) {
+       while ((l = leaf_walk_rcu(tp, key)) != NULL) {
+               key = l->key + 1;
                iter->pos++;
-               l = trie_nextleaf(l);
+
+               if (pos-- <= 0)
+                       break;
+
+               l = NULL;
+
+               /* handle unlikely case of a key wrap */
+               if (!key)
+                       break;
        }
 
        if (l)
-               iter->key = pos;        /* remember it */
+               iter->key = key;        /* remember it */
        else
                iter->pos = 0;          /* forget it */
 
@@ -2207,37 +2309,46 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
 {
        struct fib_route_iter *iter = seq->private;
        struct fib_table *tb;
+       struct trie *t;
 
        rcu_read_lock();
+
        tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
        if (!tb)
                return NULL;
 
-       iter->main_trie = (struct trie *) tb->tb_data;
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-       else
-               return fib_route_get_idx(iter, *pos - 1);
+       iter->main_tb = tb;
+
+       if (*pos != 0)
+               return fib_route_get_idx(iter, *pos);
+
+       t = (struct trie *)tb->tb_data;
+       iter->tnode = t->kv;
+       iter->pos = 0;
+       iter->key = 0;
+
+       return SEQ_START_TOKEN;
 }
 
 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct fib_route_iter *iter = seq->private;
-       struct tnode *l = v;
+       struct key_vector *l = NULL;
+       t_key key = iter->key;
 
        ++*pos;
-       if (v == SEQ_START_TOKEN) {
-               iter->pos = 0;
-               l = trie_firstleaf(iter->main_trie);
-       } else {
+
+       /* only allow key of 0 for start of sequence */
+       if ((v == SEQ_START_TOKEN) || key)
+               l = leaf_walk_rcu(&iter->tnode, key);
+
+       if (l) {
+               iter->key = l->key + 1;
                iter->pos++;
-               l = trie_nextleaf(l);
+       } else {
+               iter->pos = 0;
        }
 
-       if (l)
-               iter->key = l->key;
-       else
-               iter->pos = 0;
        return l;
 }
 
@@ -2270,7 +2381,7 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info
 static int fib_route_seq_show(struct seq_file *seq, void *v)
 {
        struct fib_alias *fa;
-       struct tnode *l = v;
+       struct key_vector *l = v;
        __be32 prefix;
 
        if (v == SEQ_START_TOKEN) {
index 81751f12645f6224a7f076dea1da2fc0d638819a..0c974d3499ed77190e54015d653463726e98f9ca 100644 (file)
@@ -508,7 +508,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
        }
        entry.sport = inet->inet_num;
        entry.dport = ntohs(inet->inet_dport);
-       entry.userlocks = sk->sk_userlocks;
+       entry.userlocks = (sk->sk_state != TCP_TIME_WAIT) ? sk->sk_userlocks : 0;
 
        return inet_diag_bc_run(bc, &entry);
 }
@@ -642,37 +642,44 @@ static int inet_csk_diag_dump(struct sock *sk,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
+static void twsk_build_assert(void)
+{
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
+                    offsetof(struct sock, sk_family));
+
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
+                    offsetof(struct inet_sock, inet_num));
+
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
+                    offsetof(struct inet_sock, inet_dport));
+
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
+                    offsetof(struct inet_sock, inet_rcv_saddr));
+
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
+                    offsetof(struct inet_sock, inet_daddr));
+
+#if IS_ENABLED(CONFIG_IPV6)
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
+                    offsetof(struct sock, sk_v6_rcv_saddr));
+
+       BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
+                    offsetof(struct sock, sk_v6_daddr));
+#endif
+}
+
 static int inet_twsk_diag_dump(struct sock *sk,
                               struct sk_buff *skb,
                               struct netlink_callback *cb,
                               struct inet_diag_req_v2 *r,
                               const struct nlattr *bc)
 {
-       struct inet_timewait_sock *tw = inet_twsk(sk);
+       twsk_build_assert();
 
-       if (bc != NULL) {
-               struct inet_diag_entry entry;
-
-               entry.family = tw->tw_family;
-#if IS_ENABLED(CONFIG_IPV6)
-               if (tw->tw_family == AF_INET6) {
-                       entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
-                       entry.daddr = tw->tw_v6_daddr.s6_addr32;
-               } else
-#endif
-               {
-                       entry.saddr = &tw->tw_rcv_saddr;
-                       entry.daddr = &tw->tw_daddr;
-               }
-               entry.sport = tw->tw_num;
-               entry.dport = ntohs(tw->tw_dport);
-               entry.userlocks = 0;
-
-               if (!inet_diag_bc_run(bc, &entry))
-                       return 0;
-       }
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
 
-       return inet_twsk_diag_fill(tw, skb, r,
+       return inet_twsk_diag_fill(inet_twsk(sk), skb, r,
                                   NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
index e5b6d0ddcb5808f662ca0b1fd5863d63e6b54b83..2c8d98e728c09feea0ffe45adf8b9b2470078c43 100644 (file)
@@ -664,7 +664,7 @@ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
        if (skb->protocol != htons(ETH_P_IP))
                return skb;
 
-       if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
+       if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
                return skb;
 
        if (iph.ihl < 5 || iph.version != 4)
index d68199d9b2b01faf7a5272862b7cd8658f08c8ae..a7aea2048a0d7a624ceb79923d25e9750ec6fa9a 100644 (file)
@@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk,
        cork->length += length;
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
+           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+           (sk->sk_type == SOCK_DGRAM)) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
index d151539da8e6948571bfdfbc105c838b3b561d71..fdf899163d4412af8bc1df82de74d410de2f7c15 100644 (file)
@@ -883,6 +883,20 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "tcp_probe_threshold",
+               .data           = &init_net.ipv4.sysctl_tcp_probe_threshold,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "tcp_probe_interval",
+               .data           = &init_net.ipv4.sysctl_tcp_probe_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
index 8fdd27b173061def484663beeace691a8bfa2365..fb4cf8b8e121acd4bffcf2fdfbd7e03c76bad7cc 100644 (file)
@@ -4770,7 +4770,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
                return false;
 
        /* If we filled the congestion window, do not expand.  */
-       if (tp->packets_out >= tp->snd_cwnd)
+       if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
                return false;
 
        return true;
index 5a2dfed4783b6ed0185dccded960972b4d6e13b0..f0c6fc32bfa836cee854a4abb7ca733c8bbc6a65 100644 (file)
@@ -2460,6 +2460,8 @@ static int __net_init tcp_sk_init(struct net *net)
        }
        net->ipv4.sysctl_tcp_ecn = 2;
        net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+       net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
+       net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
        return 0;
 
 fail:
index 8bbd86cd81c8290eecc43bff90efc14f41689cb1..5a73ad5afaf7206537a5603950a6826e5dbe01ea 100644 (file)
@@ -1354,6 +1354,8 @@ void tcp_mtup_init(struct sock *sk)
                               icsk->icsk_af_ops->net_header_len;
        icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
        icsk->icsk_mtup.probe_size = 0;
+       if (icsk->icsk_mtup.enabled)
+               icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
 }
 EXPORT_SYMBOL(tcp_mtup_init);
 
@@ -1828,6 +1830,31 @@ send_now:
        return false;
 }
 
+static inline void tcp_mtu_check_reprobe(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
+       u32 interval;
+       s32 delta;
+
+       interval = net->ipv4.sysctl_tcp_probe_interval;
+       delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
+       if (unlikely(delta >= interval * HZ)) {
+               int mss = tcp_current_mss(sk);
+
+               /* Update current search range */
+               icsk->icsk_mtup.probe_size = 0;
+               icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
+                       sizeof(struct tcphdr) +
+                       icsk->icsk_af_ops->net_header_len;
+               icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
+
+               /* Update probe time stamp */
+               icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+       }
+}
+
 /* Create a new MTU probe if we are ready.
  * MTU probe is regularly attempting to increase the path MTU by
  * deliberately sending larger packets.  This discovers routing
@@ -1842,11 +1869,13 @@ static int tcp_mtu_probe(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb, *nskb, *next;
+       struct net *net = sock_net(sk);
        int len;
        int probe_size;
        int size_needed;
        int copy;
        int mss_now;
+       int interval;
 
        /* Not currently probing/verifying,
         * not in recovery,
@@ -1859,12 +1888,25 @@ static int tcp_mtu_probe(struct sock *sk)
            tp->rx_opt.num_sacks || tp->rx_opt.dsack)
                return -1;
 
-       /* Very simple search strategy: just double the MSS. */
+       /* Use binary search for probe_size between tcp_mss_base,
+        * and current mss_clamp. if (search_high - search_low)
+        * smaller than a threshold, backoff from probing.
+        */
        mss_now = tcp_current_mss(sk);
-       probe_size = 2 * tp->mss_cache;
+       probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
+                                   icsk->icsk_mtup.search_low) >> 1);
        size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
-       if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
-               /* TODO: set timer for probe_converge_event */
+       interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
+       /* When misfortune happens, we are reprobing actively,
+        * and then reprobe timer has expired. We stick with current
+        * probing process by not resetting search range to its orignal.
+        */
+       if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
+               interval < net->ipv4.sysctl_tcp_probe_threshold) {
+               /* Check whether enough time has elaplased for
+                * another round of probing.
+                */
+               tcp_mtu_check_reprobe(sk);
                return -1;
        }
 
index 0732b787904ed32003bb776c744ed56457e0cb37..15505936511d4b21a2f34786e9481eabcd900a7c 100644 (file)
@@ -107,6 +107,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
        if (net->ipv4.sysctl_tcp_mtu_probing) {
                if (!icsk->icsk_mtup.enabled) {
                        icsk->icsk_mtup.enabled = 1;
+                       icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
                        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                } else {
                        struct net *net = sock_net(sk);
index 783bccfcc0606217c0bdadb3a01be4c0858d8640..88d2cf0cae52fd9798ed709ff64dc9462252ecdc 100644 (file)
@@ -4935,6 +4935,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static
+int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct inet6_dev *idev = ctl->extra1;
+       int min_mtu = IPV6_MIN_MTU;
+       struct ctl_table lctl;
+
+       lctl = *ctl;
+       lctl.extra1 = &min_mtu;
+       lctl.extra2 = idev ? &idev->dev->mtu : NULL;
+
+       return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
+}
+
 static void dev_disable_change(struct inet6_dev *idev)
 {
        struct netdev_notifier_info info;
@@ -5086,7 +5101,7 @@ static struct addrconf_sysctl_table
                        .data           = &ipv6_devconf.mtu6,
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .proc_handler   = addrconf_sysctl_mtu,
                },
                {
                        .procname       = "accept_ra",
index 7deebf102cbafc276f45e4eaffdd8efdb658d842..0a04a37305d5ab56d0f24cc3b3f917386cb62bd8 100644 (file)
@@ -1298,7 +1298,8 @@ emsgsize:
        if (((length > mtu) ||
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO)) {
+           (rt->dst.dev->features & NETIF_F_UFO) &&
+           (sk->sk_type == SOCK_DGRAM)) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen,
                                          transhdrlen, mtu, flags, rt);
index e363bbc2420d8dec7e4d3c4626ac089513d0a04e..247ad7c298f73e38a0169f0df299f9ba96de3271 100644 (file)
@@ -84,6 +84,7 @@ do {                                                          \
 static u32 ndisc_hash(const void *pkey,
                      const struct net_device *dev,
                      __u32 *hash_rnd);
+static bool ndisc_key_eq(const struct neighbour *neigh, const void *pkey);
 static int ndisc_constructor(struct neighbour *neigh);
 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -119,6 +120,7 @@ struct neigh_table nd_tbl = {
        .key_len =      sizeof(struct in6_addr),
        .protocol =     cpu_to_be16(ETH_P_IPV6),
        .hash =         ndisc_hash,
+       .key_eq =       ndisc_key_eq,
        .constructor =  ndisc_constructor,
        .pconstructor = pndisc_constructor,
        .pdestructor =  pndisc_destructor,
@@ -295,6 +297,11 @@ static u32 ndisc_hash(const void *pkey,
        return ndisc_hashfn(pkey, dev, hash_rnd);
 }
 
+static bool ndisc_key_eq(const struct neighbour *n, const void *pkey)
+{
+       return neigh_key_eq128(n, pkey);
+}
+
 static int ndisc_constructor(struct neighbour *neigh)
 {
        struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key;
index 40695b9751c10b41e7fd310fa5d15bcef4629549..9940a41efca1a8a1f3df86314c73bb5c1b330ce2 100644 (file)
@@ -811,7 +811,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
                        break;
        }
        spin_unlock_irqrestore(&self->spinlock, flags);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
 }
 
 /*
index 3c83a1e5ab0394f0eaa04b4ba5a813d118c17434..1215693fdd22897b5217b878763d56d087a25372 100644 (file)
@@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket *      ap,
 
   /* Put ourselves on the wait queue to be woken up */
   add_wait_queue(&irnet_events.rwait, &wait);
-  current->state = TASK_INTERRUPTIBLE;
+  set_current_state(TASK_INTERRUPTIBLE);
   for(;;)
     {
       /* If there is unread events */
@@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket *      ap,
       /* Yield and wait to be woken up */
       schedule();
     }
-  current->state = TASK_RUNNING;
+  __set_current_state(TASK_RUNNING);
   remove_wait_queue(&irnet_events.rwait, &wait);
 
   /* Did we got it ? */
index 781b3a226ba73204aa9ff68923ae2385a88e7205..4b552873b55603a648f37bbd497efbb43bd869a4 100644 (file)
@@ -74,7 +74,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
 
        priv->dev = dev;
        eth_hw_addr_random(dev);
-       memset(&dev->broadcast[0], 0xff, 6);
+       eth_broadcast_addr(dev->broadcast);
        dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
        return 0;
 }
index dd4ff36c557a44158ef64cd18aa090600fec1faf..74f509c500f2ea56edf02247f034d2134581ab00 100644 (file)
@@ -1488,7 +1488,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
        if (next_hop_sta)
                memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
        else
-               memset(next_hop, 0, ETH_ALEN);
+               eth_zero_addr(next_hop);
 
        memset(pinfo, 0, sizeof(*pinfo));
 
index ff0d2db09df9db467a5831606971e02f2fe6d410..5bcd4e5589d3294602c4abdeff778497afbc8de1 100644 (file)
@@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
        if (ieee80211_chanctx_refcount(local, ctx) == 0)
                ieee80211_free_chanctx(local, ctx);
 
+       sdata->radar_required = false;
+
        /* Unreserving may ready an in-place reservation. */
        if (use_reserved_switch)
                ieee80211_vif_use_reserved_switch(local);
@@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        ieee80211_recalc_smps_chanctx(local, ctx);
        ieee80211_recalc_radar_chanctx(local, ctx);
  out:
+       if (ret)
+               sdata->radar_required = false;
+
        mutex_unlock(&local->chanctx_mtx);
        return ret;
 }
index b606b53a49a7d92e178eb502f18469bd9da7a5cd..f9b07588baf5d331606092522b589a9bf4a52844 100644 (file)
@@ -1742,7 +1742,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        ieee80211_ibss_disconnect(sdata);
        ifibss->ssid_len = 0;
-       memset(ifibss->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifibss->bssid);
 
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
index 0c8b2a77d312d5e3ad18f975ce808c44755c820b..49a44bcd8ababbed372e7b26d20ccbf871d34c7b 100644 (file)
@@ -520,7 +520,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
        } else {
                *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
                /* RA TA DA SA */
-               memset(hdr->addr1, 0, ETH_ALEN);   /* RA is resolved later */
+               eth_zero_addr(hdr->addr1);   /* RA is resolved later */
                memcpy(hdr->addr2, meshsa, ETH_ALEN);
                memcpy(hdr->addr3, meshda, ETH_ALEN);
                memcpy(hdr->addr4, meshsa, ETH_ALEN);
index 10ac6324c1d014c708749748ce89ef31055561cf..9f6f3562396a9fb9facab4832821fe5d321b506b 100644 (file)
@@ -2033,7 +2033,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
                ieee80211_flush_queues(local, sdata, false);
 
        /* clear bssid only after building the needed mgmt frames */
-       memset(ifmgd->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifmgd->bssid);
 
        /* remove AP and TDLS peers */
        sta_info_flush(sdata);
@@ -2464,7 +2464,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
                del_timer_sync(&sdata->u.mgd.timer);
                sta_info_destroy_addr(sdata, auth_data->bss->bssid);
 
-               memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+               eth_zero_addr(sdata->u.mgd.bssid);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
                sdata->u.mgd.flags = 0;
                mutex_lock(&sdata->local->mtx);
@@ -2777,7 +2777,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
                del_timer_sync(&sdata->u.mgd.timer);
                sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
 
-               memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+               eth_zero_addr(sdata->u.mgd.bssid);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
                sdata->u.mgd.flags = 0;
                mutex_lock(&sdata->local->mtx);
@@ -4474,7 +4474,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        return 0;
 
  err_clear:
-       memset(ifmgd->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifmgd->bssid);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->auth_data = NULL;
  err_free:
@@ -4817,7 +4817,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        return 0;
  err_clear:
-       memset(ifmgd->bssid, 0, ETH_ALEN);
+       eth_zero_addr(ifmgd->bssid);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->assoc_data = NULL;
  err_free:
index 7c86a002df95fee46be8e7dfdb0d691fff0e9e7f..ef6e8a6c4253c72f6f2398b733e8c427f5b59953 100644 (file)
@@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
                rate++;
                mi->sample_deferred++;
        } else {
-               if (!msr->sample_limit != 0)
+               if (!msr->sample_limit)
                        return;
 
                mi->sample_packets++;
index 88a18ffe2975520edbcc80733bc1bbc9b2655f11..07bd8db00af84b820139c644da95eaf29e474b5f 100644 (file)
@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
                if (tx->sdata->control_port_no_encrypt)
                        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
                info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+               info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
        }
 
        return TX_CONTINUE;
index 37421db8896524299ea4648c187f7a235de10d6c..dfca485863e978d9fbf716421fcdd9f83a8b0c15 100644 (file)
@@ -1,9 +1,30 @@
 #
 # MPLS configuration
 #
+
+menuconfig MPLS
+       tristate "MultiProtocol Label Switching"
+       default n
+       ---help---
+         MultiProtocol Label Switching routes packets through logical
+         circuits.  Originally conceived as a way of routing packets at
+         hardware speeds (before hardware was capable of routing ipv4 packets),
+         MPLS remains a simple way of making tunnels.
+
+         If you have not heard of MPLS you probably want to say N here.
+
+if MPLS
+
 config NET_MPLS_GSO
-       tristate "MPLS: GSO support"
+       bool "MPLS: GSO support"
        help
         This is helper module to allow segmentation of non-MPLS GSO packets
         that have had MPLS stack entries pushed onto them and thus
         become MPLS GSO packets.
+
+config MPLS_ROUTING
+       bool "MPLS: routing support"
+       help
+        Add support for forwarding of mpls packets.
+
+endif # MPLS
index 6dec088c2d0f77dff06bf6bbd8557d2f325bafa1..60af15f1960e73839adc57104b0807d65fcdfd2e 100644 (file)
@@ -2,3 +2,4 @@
 # Makefile for MPLS.
 #
 obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
+obj-$(CONFIG_MPLS_ROUTING) += af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
new file mode 100644 (file)
index 0000000..0ad8f71
--- /dev/null
@@ -0,0 +1,1008 @@
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/sysctl.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/ipv6.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/ip_fib.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include "internal.h"
+
+#define LABEL_NOT_SPECIFIED (1<<20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
+
+struct mpls_route { /* next hop label forwarding entry */
+       struct net_device __rcu *rt_dev;
+       struct rcu_head         rt_rcu;
+       u32                     rt_label[MAX_NEW_LABELS];
+       u8                      rt_protocol; /* routing protocol that set this entry */
+       u8                      rt_labels;
+       u8                      rt_via_alen;
+       u8                      rt_via_table;
+       u8                      rt_via[0];
+};
+
+static int zero = 0;
+static int label_limit = (1 << 20) - 1;
+
+static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+                      struct nlmsghdr *nlh, struct net *net, u32 portid,
+                      unsigned int nlm_flags);
+
+static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
+{
+       struct mpls_route *rt = NULL;
+
+       if (index < net->mpls.platform_labels) {
+               struct mpls_route __rcu **platform_label =
+                       rcu_dereference(net->mpls.platform_label);
+               rt = rcu_dereference(platform_label[index]);
+       }
+       return rt;
+}
+
+static bool mpls_output_possible(const struct net_device *dev)
+{
+       return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
+}
+
+static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+{
+       /* The size of the layer 2.5 labels to be added for this route */
+       return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+}
+
+static unsigned int mpls_dev_mtu(const struct net_device *dev)
+{
+       /* The amount of data the layer 2 frame can hold */
+       return dev->mtu;
+}
+
+static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+{
+       if (skb->len <= mtu)
+               return false;
+
+       if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+               return false;
+
+       return true;
+}
+
+static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
+                       struct mpls_entry_decoded dec)
+{
+       /* RFC4385 and RFC5586 encode other packets in mpls such that
+        * they don't conflict with the ip version number, making
+        * decoding by examining the ip version correct in everything
+        * except for the strangest cases.
+        *
+        * The strange cases if we choose to support them will require
+        * manual configuration.
+        */
+       struct iphdr *hdr4 = ip_hdr(skb);
+       bool success = true;
+
+       if (hdr4->version == 4) {
+               skb->protocol = htons(ETH_P_IP);
+               csum_replace2(&hdr4->check,
+                             htons(hdr4->ttl << 8),
+                             htons(dec.ttl << 8));
+               hdr4->ttl = dec.ttl;
+       }
+       else if (hdr4->version == 6) {
+               struct ipv6hdr *hdr6 = ipv6_hdr(skb);
+               skb->protocol = htons(ETH_P_IPV6);
+               hdr6->hop_limit = dec.ttl;
+       }
+       else
+               /* version 0 and version 1 are used by pseudo wires */
+               success = false;
+       return success;
+}
+
+static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+                       struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct net *net = dev_net(dev);
+       struct mpls_shim_hdr *hdr;
+       struct mpls_route *rt;
+       struct mpls_entry_decoded dec;
+       struct net_device *out_dev;
+       unsigned int hh_len;
+       unsigned int new_header_size;
+       unsigned int mtu;
+       int err;
+
+       /* Careful this entire function runs inside of an rcu critical section */
+
+       if (skb->pkt_type != PACKET_HOST)
+               goto drop;
+
+       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+               goto drop;
+
+       if (!pskb_may_pull(skb, sizeof(*hdr)))
+               goto drop;
+
+       /* Read and decode the label */
+       hdr = mpls_hdr(skb);
+       dec = mpls_entry_decode(hdr);
+
+       /* Pop the label */
+       skb_pull(skb, sizeof(*hdr));
+       skb_reset_network_header(skb);
+
+       skb_orphan(skb);
+
+       rt = mpls_route_input_rcu(net, dec.label);
+       if (!rt)
+               goto drop;
+
+       /* Find the output device */
+       out_dev = rcu_dereference(rt->rt_dev);
+       if (!mpls_output_possible(out_dev))
+               goto drop;
+
+       if (skb_warn_if_lro(skb))
+               goto drop;
+
+       skb_forward_csum(skb);
+
+       /* Verify ttl is valid */
+       if (dec.ttl <= 1)
+               goto drop;
+       dec.ttl -= 1;
+
+       /* Verify the destination can hold the packet */
+       new_header_size = mpls_rt_header_size(rt);
+       mtu = mpls_dev_mtu(out_dev);
+       if (mpls_pkt_too_big(skb, mtu - new_header_size))
+               goto drop;
+
+       hh_len = LL_RESERVED_SPACE(out_dev);
+       if (!out_dev->header_ops)
+               hh_len = 0;
+
+       /* Ensure there is enough space for the headers in the skb */
+       if (skb_cow(skb, hh_len + new_header_size))
+               goto drop;
+
+       skb->dev = out_dev;
+       skb->protocol = htons(ETH_P_MPLS_UC);
+
+       if (unlikely(!new_header_size && dec.bos)) {
+               /* Penultimate hop popping */
+               if (!mpls_egress(rt, skb, dec))
+                       goto drop;
+       } else {
+               bool bos;
+               int i;
+               skb_push(skb, new_header_size);
+               skb_reset_network_header(skb);
+               /* Push the new labels */
+               hdr = mpls_hdr(skb);
+               bos = dec.bos;
+               for (i = rt->rt_labels - 1; i >= 0; i--) {
+                       hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+                       bos = false;
+               }
+       }
+
+       err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+       if (err)
+               net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+                                   __func__, err);
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static struct packet_type mpls_packet_type __read_mostly = {
+       .type = cpu_to_be16(ETH_P_MPLS_UC),
+       .func = mpls_forward,
+};
+
+static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
+       [RTA_DST]               = { .type = NLA_U32 },
+       [RTA_OIF]               = { .type = NLA_U32 },
+};
+
+struct mpls_route_config {
+       u32             rc_protocol;
+       u32             rc_ifindex;
+       u16             rc_via_table;
+       u16             rc_via_alen;
+       u8              rc_via[MAX_VIA_ALEN];
+       u32             rc_label;
+       u32             rc_output_labels;
+       u32             rc_output_label[MAX_NEW_LABELS];
+       u32             rc_nlflags;
+       struct nl_info  rc_nlinfo;
+};
+
+static struct mpls_route *mpls_rt_alloc(size_t alen)
+{
+       struct mpls_route *rt;
+
+       rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
+       if (rt)
+               rt->rt_via_alen = alen;
+       return rt;
+}
+
+static void mpls_rt_free(struct mpls_route *rt)
+{
+       if (rt)
+               kfree_rcu(rt, rt_rcu);
+}
+
+static void mpls_notify_route(struct net *net, unsigned index,
+                             struct mpls_route *old, struct mpls_route *new,
+                             const struct nl_info *info)
+{
+       struct nlmsghdr *nlh = info ? info->nlh : NULL;
+       unsigned portid = info ? info->portid : 0;
+       int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
+       struct mpls_route *rt = new ? new : old;
+       unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
+       /* Ignore reserved labels for now */
+       if (rt && (index >= 16))
+               rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
+}
+
+static void mpls_route_update(struct net *net, unsigned index,
+                             struct net_device *dev, struct mpls_route *new,
+                             const struct nl_info *info)
+{
+       struct mpls_route __rcu **platform_label;
+       struct mpls_route *rt, *old = NULL;
+
+       ASSERT_RTNL();
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       rt = rtnl_dereference(platform_label[index]);
+       if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
+               rcu_assign_pointer(platform_label[index], new);
+               old = rt;
+       }
+
+       mpls_notify_route(net, index, old, new, info);
+
+       /* If we removed a route free it now */
+       mpls_rt_free(old);
+}
+
+static unsigned find_free_label(struct net *net)
+{
+       struct mpls_route __rcu **platform_label;
+       size_t platform_labels;
+       unsigned index;
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       platform_labels = net->mpls.platform_labels;
+       for (index = 16; index < platform_labels; index++) {
+               if (!rtnl_dereference(platform_label[index]))
+                       return index;
+       }
+       return LABEL_NOT_SPECIFIED;
+}
+
+static int mpls_route_add(struct mpls_route_config *cfg)
+{
+       struct mpls_route __rcu **platform_label;
+       struct net *net = cfg->rc_nlinfo.nl_net;
+       struct net_device *dev = NULL;
+       struct mpls_route *rt, *old;
+       unsigned index;
+       int i;
+       int err = -EINVAL;
+
+       index = cfg->rc_label;
+
+       /* If a label was not specified during insert pick one */
+       if ((index == LABEL_NOT_SPECIFIED) &&
+           (cfg->rc_nlflags & NLM_F_CREATE)) {
+               index = find_free_label(net);
+       }
+
+       /* The first 16 labels are reserved, and may not be set */
+       if (index < 16)
+               goto errout;
+
+       /* The full 20 bit range may not be supported. */
+       if (index >= net->mpls.platform_labels)
+               goto errout;
+
+       /* Ensure only a supported number of labels are present */
+       if (cfg->rc_output_labels > MAX_NEW_LABELS)
+               goto errout;
+
+       err = -ENODEV;
+       dev = dev_get_by_index(net, cfg->rc_ifindex);
+       if (!dev)
+               goto errout;
+
+       /* For now just support ethernet devices */
+       err = -EINVAL;
+       if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
+               goto errout;
+
+       err = -EINVAL;
+       if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
+           (dev->addr_len != cfg->rc_via_alen))
+               goto errout;
+
+       /* Append makes no sense with mpls */
+       err = -EOPNOTSUPP;
+       if (cfg->rc_nlflags & NLM_F_APPEND)
+               goto errout;
+
+       err = -EEXIST;
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       old = rtnl_dereference(platform_label[index]);
+       if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
+               goto errout;
+
+       err = -EEXIST;
+       if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
+               goto errout;
+
+       err = -ENOENT;
+       if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
+               goto errout;
+
+       err = -ENOMEM;
+       rt = mpls_rt_alloc(cfg->rc_via_alen);
+       if (!rt)
+               goto errout;
+
+       rt->rt_labels = cfg->rc_output_labels;
+       for (i = 0; i < rt->rt_labels; i++)
+               rt->rt_label[i] = cfg->rc_output_label[i];
+       rt->rt_protocol = cfg->rc_protocol;
+       RCU_INIT_POINTER(rt->rt_dev, dev);
+       rt->rt_via_table = cfg->rc_via_table;
+       memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
+
+       mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+
+       dev_put(dev);
+       return 0;
+
+errout:
+       if (dev)
+               dev_put(dev);
+       return err;
+}
+
+static int mpls_route_del(struct mpls_route_config *cfg)
+{
+       struct net *net = cfg->rc_nlinfo.nl_net;
+       unsigned index;
+       int err = -EINVAL;
+
+       index = cfg->rc_label;
+
+       /* The first 16 labels are reserved, and may not be removed */
+       if (index < 16)
+               goto errout;
+
+       /* The full 20 bit range may not be supported */
+       if (index >= net->mpls.platform_labels)
+               goto errout;
+
+       mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+
+       err = 0;
+errout:
+       return err;
+}
+
+static void mpls_ifdown(struct net_device *dev)
+{
+       struct mpls_route __rcu **platform_label;
+       struct net *net = dev_net(dev);
+       unsigned index;
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       for (index = 0; index < net->mpls.platform_labels; index++) {
+               struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               if (!rt)
+                       continue;
+               if (rtnl_dereference(rt->rt_dev) != dev)
+                       continue;
+               rt->rt_dev = NULL;
+       }
+}
+
+static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       switch(event) {
+       case NETDEV_UNREGISTER:
+               mpls_ifdown(dev);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block mpls_dev_notifier = {
+       .notifier_call = mpls_dev_notify,
+};
+
+static int nla_put_via(struct sk_buff *skb,
+                      u8 table, const void *addr, int alen)
+{
+       static const int table_to_family[NEIGH_NR_TABLES + 1] = {
+               AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
+       };
+       struct nlattr *nla;
+       struct rtvia *via;
+       int family = AF_UNSPEC;
+
+       nla = nla_reserve(skb, RTA_VIA, alen + 2);
+       if (!nla)
+               return -EMSGSIZE;
+
+       if (table <= NEIGH_NR_TABLES)
+               family = table_to_family[table];
+
+       via = nla_data(nla);
+       via->rtvia_family = family;
+       memcpy(via->rtvia_addr, addr, alen);
+       return 0;
+}
+
+int nla_put_labels(struct sk_buff *skb, int attrtype,
+                  u8 labels, const u32 label[])
+{
+       struct nlattr *nla;
+       struct mpls_shim_hdr *nla_label;
+       bool bos;
+       int i;
+       nla = nla_reserve(skb, attrtype, labels*4);
+       if (!nla)
+               return -EMSGSIZE;
+
+       nla_label = nla_data(nla);
+       bos = true;
+       for (i = labels - 1; i >= 0; i--) {
+               nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
+               bos = false;
+       }
+
+       return 0;
+}
+
+int nla_get_labels(const struct nlattr *nla,
+                  u32 max_labels, u32 *labels, u32 label[])
+{
+       unsigned len = nla_len(nla);
+       unsigned nla_labels;
+       struct mpls_shim_hdr *nla_label;
+       bool bos;
+       int i;
+
+       /* len needs to be an even multiple of 4 (the label size) */
+       if (len & 3)
+               return -EINVAL;
+
+       /* Limit the number of new labels allowed */
+       nla_labels = len/4;
+       if (nla_labels > max_labels)
+               return -EINVAL;
+
+       nla_label = nla_data(nla);
+       bos = true;
+       for (i = nla_labels - 1; i >= 0; i--, bos = false) {
+               struct mpls_entry_decoded dec;
+               dec = mpls_entry_decode(nla_label + i);
+
+               /* Ensure the bottom of stack flag is properly set
+                * and ttl and tc are both clear.
+                */
+               if ((dec.bos != bos) || dec.ttl || dec.tc)
+                       return -EINVAL;
+
+               label[i] = dec.label;
+       }
+       *labels = nla_labels;
+       return 0;
+}
+
+static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
+                              struct mpls_route_config *cfg)
+{
+       struct rtmsg *rtm;
+       struct nlattr *tb[RTA_MAX+1];
+       int index;
+       int err;
+
+       err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy);
+       if (err < 0)
+               goto errout;
+
+       err = -EINVAL;
+       rtm = nlmsg_data(nlh);
+       memset(cfg, 0, sizeof(*cfg));
+
+       if (rtm->rtm_family != AF_MPLS)
+               goto errout;
+       if (rtm->rtm_dst_len != 20)
+               goto errout;
+       if (rtm->rtm_src_len != 0)
+               goto errout;
+       if (rtm->rtm_tos != 0)
+               goto errout;
+       if (rtm->rtm_table != RT_TABLE_MAIN)
+               goto errout;
+       /* Any value is acceptable for rtm_protocol */
+
+       /* As mpls uses destination specific addresses
+        * (or source specific address in the case of multicast)
+        * all addresses have universal scope.
+        */
+       if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
+               goto errout;
+       if (rtm->rtm_type != RTN_UNICAST)
+               goto errout;
+       if (rtm->rtm_flags != 0)
+               goto errout;
+
+       cfg->rc_label           = LABEL_NOT_SPECIFIED;
+       cfg->rc_protocol        = rtm->rtm_protocol;
+       cfg->rc_nlflags         = nlh->nlmsg_flags;
+       cfg->rc_nlinfo.portid   = NETLINK_CB(skb).portid;
+       cfg->rc_nlinfo.nlh      = nlh;
+       cfg->rc_nlinfo.nl_net   = sock_net(skb->sk);
+
+       for (index = 0; index <= RTA_MAX; index++) {
+               struct nlattr *nla = tb[index];
+               if (!nla)
+                       continue;
+
+               switch(index) {
+               case RTA_OIF:
+                       cfg->rc_ifindex = nla_get_u32(nla);
+                       break;
+               case RTA_NEWDST:
+                       if (nla_get_labels(nla, MAX_NEW_LABELS,
+                                          &cfg->rc_output_labels,
+                                          cfg->rc_output_label))
+                               goto errout;
+                       break;
+               case RTA_DST:
+               {
+                       u32 label_count;
+                       if (nla_get_labels(nla, 1, &label_count,
+                                          &cfg->rc_label))
+                               goto errout;
+
+                       /* The first 16 labels are reserved, and may not be set */
+                       if (cfg->rc_label < 16)
+                               goto errout;
+
+                       break;
+               }
+               case RTA_VIA:
+               {
+                       struct rtvia *via = nla_data(nla);
+                       if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+                               goto errout;
+                       cfg->rc_via_alen   = nla_len(nla) -
+                               offsetof(struct rtvia, rtvia_addr);
+                       if (cfg->rc_via_alen > MAX_VIA_ALEN)
+                               goto errout;
+
+                       /* Validate the address family */
+                       switch(via->rtvia_family) {
+                       case AF_PACKET:
+                               cfg->rc_via_table = NEIGH_LINK_TABLE;
+                               break;
+                       case AF_INET:
+                               cfg->rc_via_table = NEIGH_ARP_TABLE;
+                               if (cfg->rc_via_alen != 4)
+                                       goto errout;
+                               break;
+                       case AF_INET6:
+                               cfg->rc_via_table = NEIGH_ND_TABLE;
+                               if (cfg->rc_via_alen != 16)
+                                       goto errout;
+                               break;
+                       default:
+                               /* Unsupported address family */
+                               goto errout;
+                       }
+
+                       memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+                       break;
+               }
+               default:
+                       /* Unsupported attribute */
+                       goto errout;
+               }
+       }
+
+       err = 0;
+errout:
+       return err;
+}
+
+static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct mpls_route_config cfg;
+       int err;
+
+       err = rtm_to_route_config(skb, nlh, &cfg);
+       if (err < 0)
+               return err;
+
+       return mpls_route_del(&cfg);
+}
+
+
+static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct mpls_route_config cfg;
+       int err;
+
+       err = rtm_to_route_config(skb, nlh, &cfg);
+       if (err < 0)
+               return err;
+
+       return mpls_route_add(&cfg);
+}
+
+static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
+                          u32 label, struct mpls_route *rt, int flags)
+{
+       struct net_device *dev;
+       struct nlmsghdr *nlh;
+       struct rtmsg *rtm;
+
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
+       if (nlh == NULL)
+               return -EMSGSIZE;
+
+       rtm = nlmsg_data(nlh);
+       rtm->rtm_family = AF_MPLS;
+       rtm->rtm_dst_len = 20;
+       rtm->rtm_src_len = 0;
+       rtm->rtm_tos = 0;
+       rtm->rtm_table = RT_TABLE_MAIN;
+       rtm->rtm_protocol = rt->rt_protocol;
+       rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+       rtm->rtm_type = RTN_UNICAST;
+       rtm->rtm_flags = 0;
+
+       if (rt->rt_labels &&
+           nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
+               goto nla_put_failure;
+       if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
+               goto nla_put_failure;
+       dev = rtnl_dereference(rt->rt_dev);
+       if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+               goto nla_put_failure;
+       if (nla_put_labels(skb, RTA_DST, 1, &label))
+               goto nla_put_failure;
+
+       nlmsg_end(skb, nlh);
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct mpls_route __rcu **platform_label;
+       size_t platform_labels;
+       unsigned int index;
+
+       ASSERT_RTNL();
+
+       index = cb->args[0];
+       if (index < 16)
+               index = 16;
+
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       platform_labels = net->mpls.platform_labels;
+       for (; index < platform_labels; index++) {
+               struct mpls_route *rt;
+               rt = rtnl_dereference(platform_label[index]);
+               if (!rt)
+                       continue;
+
+               if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
+                                   cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+                                   index, rt, NLM_F_MULTI) < 0)
+                       break;
+       }
+       cb->args[0] = index;
+
+       return skb->len;
+}
+
+static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
+{
+       size_t payload =
+               NLMSG_ALIGN(sizeof(struct rtmsg))
+               + nla_total_size(2 + rt->rt_via_alen)   /* RTA_VIA */
+               + nla_total_size(4);                    /* RTA_DST */
+       if (rt->rt_labels)                              /* RTA_NEWDST */
+               payload += nla_total_size(rt->rt_labels * 4);
+       if (rt->rt_dev)                                 /* RTA_OIF */
+               payload += nla_total_size(4);
+       return payload;
+}
+
+static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+                      struct nlmsghdr *nlh, struct net *net, u32 portid,
+                      unsigned int nlm_flags)
+{
+       struct sk_buff *skb;
+       u32 seq = nlh ? nlh->nlmsg_seq : 0;
+       int err = -ENOBUFS;
+
+       skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
+       if (skb == NULL)
+               goto errout;
+
+       err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
+       if (err < 0) {
+               /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(skb);
+               goto errout;
+       }
+       rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
+
+       return;
+errout:
+       if (err < 0)
+               rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
+}
+
+static int resize_platform_label_table(struct net *net, size_t limit)
+{
+       size_t size = sizeof(struct mpls_route *) * limit;
+       size_t old_limit;
+       size_t cp_size;
+       struct mpls_route __rcu **labels = NULL, **old;
+       struct mpls_route *rt0 = NULL, *rt2 = NULL;
+       unsigned index;
+
+       if (size) {
+               labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+               if (!labels)
+                       labels = vzalloc(size);
+
+               if (!labels)
+                       goto nolabels;
+       }
+
+       /* In case the predefined labels need to be populated */
+       if (limit > LABEL_IPV4_EXPLICIT_NULL) {
+               struct net_device *lo = net->loopback_dev;
+               rt0 = mpls_rt_alloc(lo->addr_len);
+               if (!rt0)
+                       goto nort0;
+               RCU_INIT_POINTER(rt0->rt_dev, lo);
+               rt0->rt_protocol = RTPROT_KERNEL;
+               rt0->rt_via_table = NEIGH_LINK_TABLE;
+               memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+       }
+       if (limit > LABEL_IPV6_EXPLICIT_NULL) {
+               struct net_device *lo = net->loopback_dev;
+               rt2 = mpls_rt_alloc(lo->addr_len);
+               if (!rt2)
+                       goto nort2;
+               RCU_INIT_POINTER(rt2->rt_dev, lo);
+               rt2->rt_protocol = RTPROT_KERNEL;
+               rt2->rt_via_table = NEIGH_LINK_TABLE;
+               memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+       }
+
+       rtnl_lock();
+       /* Remember the original table */
+       old = rtnl_dereference(net->mpls.platform_label);
+       old_limit = net->mpls.platform_labels;
+
+       /* Free any labels beyond the new table */
+       for (index = limit; index < old_limit; index++)
+               mpls_route_update(net, index, NULL, NULL, NULL);
+
+       /* Copy over the old labels */
+       cp_size = size;
+       if (old_limit < limit)
+               cp_size = old_limit * sizeof(struct mpls_route *);
+
+       memcpy(labels, old, cp_size);
+
+       /* If needed set the predefined labels */
+       if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) &&
+           (limit > LABEL_IPV6_EXPLICIT_NULL)) {
+               RCU_INIT_POINTER(labels[LABEL_IPV6_EXPLICIT_NULL], rt2);
+               rt2 = NULL;
+       }
+
+       if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) &&
+           (limit > LABEL_IPV4_EXPLICIT_NULL)) {
+               RCU_INIT_POINTER(labels[LABEL_IPV4_EXPLICIT_NULL], rt0);
+               rt0 = NULL;
+       }
+
+       /* Update the global pointers */
+       net->mpls.platform_labels = limit;
+       rcu_assign_pointer(net->mpls.platform_label, labels);
+
+       rtnl_unlock();
+
+       mpls_rt_free(rt2);
+       mpls_rt_free(rt0);
+
+       if (old) {
+               synchronize_rcu();
+               kvfree(old);
+       }
+       return 0;
+
+nort2:
+       mpls_rt_free(rt0);
+nort0:
+       kvfree(labels);
+nolabels:
+       return -ENOMEM;
+}
+
+static int mpls_platform_labels(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct net *net = table->data;
+       int platform_labels = net->mpls.platform_labels;
+       int ret;
+       struct ctl_table tmp = {
+               .procname       = table->procname,
+               .data           = &platform_labels,
+               .maxlen         = sizeof(int),
+               .mode           = table->mode,
+               .extra1         = &zero,
+               .extra2         = &label_limit,
+       };
+
+       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+       if (write && ret == 0)
+               ret = resize_platform_label_table(net, platform_labels);
+
+       return ret;
+}
+
+static struct ctl_table mpls_table[] = {
+       {
+               .procname       = "platform_labels",
+               .data           = NULL,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = mpls_platform_labels,
+       },
+       { }
+};
+
+static int mpls_net_init(struct net *net)
+{
+       struct ctl_table *table;
+
+       net->mpls.platform_labels = 0;
+       net->mpls.platform_label = NULL;
+
+       table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
+       if (table == NULL)
+               return -ENOMEM;
+
+       table[0].data = net;
+       net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
+       if (net->mpls.ctl == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void mpls_net_exit(struct net *net)
+{
+       struct mpls_route __rcu **platform_label;
+       size_t platform_labels;
+       struct ctl_table *table;
+       unsigned int index;
+
+       table = net->mpls.ctl->ctl_table_arg;
+       unregister_net_sysctl_table(net->mpls.ctl);
+       kfree(table);
+
+       /* An rcu grace period has passed since there was a device in
+        * the network namespace (and thus the last in flight packet)
+        * left this network namespace.  This is because
+        * unregister_netdevice_many and netdev_run_todo has completed
+        * for each network device that was in this network namespace.
+        *
+        * As such no additional rcu synchronization is necessary when
+        * freeing the platform_label table.
+        */
+       rtnl_lock();
+       platform_label = rtnl_dereference(net->mpls.platform_label);
+       platform_labels = net->mpls.platform_labels;
+       for (index = 0; index < platform_labels; index++) {
+               struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               RCU_INIT_POINTER(platform_label[index], NULL);
+               mpls_rt_free(rt);
+       }
+       rtnl_unlock();
+
+       kvfree(platform_label);
+}
+
+static struct pernet_operations mpls_net_ops = {
+       .init = mpls_net_init,
+       .exit = mpls_net_exit,
+};
+
+static int __init mpls_init(void)
+{
+       int err;
+
+       BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
+
+       err = register_pernet_subsys(&mpls_net_ops);
+       if (err)
+               goto out;
+
+       err = register_netdevice_notifier(&mpls_dev_notifier);
+       if (err)
+               goto out_unregister_pernet;
+
+       dev_add_pack(&mpls_packet_type);
+
+       rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
+       rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
+       rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
+       err = 0;
+out:
+       return err;
+
+out_unregister_pernet:
+       unregister_pernet_subsys(&mpls_net_ops);
+       goto out;
+}
+module_init(mpls_init);
+
+static void __exit mpls_exit(void)
+{
+       rtnl_unregister_all(PF_MPLS);
+       dev_remove_pack(&mpls_packet_type);
+       unregister_netdevice_notifier(&mpls_dev_notifier);
+       unregister_pernet_subsys(&mpls_net_ops);
+}
+module_exit(mpls_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_NETPROTO(PF_MPLS);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
new file mode 100644 (file)
index 0000000..fb6de92
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef MPLS_INTERNAL_H
+#define MPLS_INTERNAL_H
+
+#define LABEL_IPV4_EXPLICIT_NULL       0 /* RFC3032 */
+#define LABEL_ROUTER_ALERT_LABEL       1 /* RFC3032 */
+#define LABEL_IPV6_EXPLICIT_NULL       2 /* RFC3032 */
+#define LABEL_IMPLICIT_NULL            3 /* RFC3032 */
+#define LABEL_ENTROPY_INDICATOR                7 /* RFC6790 */
+#define LABEL_GAL                      13 /* RFC5586 */
+#define LABEL_OAM_ALERT                        14 /* RFC3429 */
+#define LABEL_EXTENSION                        15 /* RFC7274 */
+
+
+struct mpls_shim_hdr {
+       __be32 label_stack_entry;
+};
+
+struct mpls_entry_decoded {
+       u32 label;
+       u8 ttl;
+       u8 tc;
+       u8 bos;
+};
+
+struct sk_buff;
+
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
+{
+       return (struct mpls_shim_hdr *)skb_network_header(skb);
+}
+
+static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos)
+{
+       struct mpls_shim_hdr result;
+       result.label_stack_entry =
+               cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
+                           (tc << MPLS_LS_TC_SHIFT) |
+                           (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
+                           (ttl << MPLS_LS_TTL_SHIFT));
+       return result;
+}
+
+static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr)
+{
+       struct mpls_entry_decoded result;
+       unsigned entry = be32_to_cpu(hdr->label_stack_entry);
+
+       result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
+       result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
+       result.tc =  (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
+       result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
+
+       return result;
+}
+
+int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels, const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+
+#endif /* MPLS_INTERNAL_H */
index 76cc9ffd87fa80c594c685fb81f105ee7ca90ae4..49532672f66dad0c3bae1b923993b0d1f518b25b 100644 (file)
@@ -3466,7 +3466,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
                if (udest.af == 0)
                        udest.af = svc->af;
 
-               if (udest.af != svc->af) {
+               if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
                        /* The synchronization protocol is incompatible
                         * with mixed family services
                         */
index a990df2f3f7100d1e47acfb188e6ba6cd907d833..29fbcf25f88f2b4447656b23d99a3356026f84d5 100644 (file)
@@ -634,8 +634,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
                struct xt_match *match = nft_match->ops.data;
 
                if (strcmp(match->name, mt_name) == 0 &&
-                   match->revision == rev && match->family == family)
+                   match->revision == rev && match->family == family) {
+                       if (!try_module_get(match->me))
+                               return ERR_PTR(-ENOENT);
+
                        return &nft_match->ops;
+               }
        }
 
        match = xt_request_find_match(family, mt_name, rev);
@@ -704,8 +708,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
                struct xt_target *target = nft_target->ops.data;
 
                if (strcmp(target->name, tg_name) == 0 &&
-                   target->revision == rev && target->family == family)
+                   target->revision == rev && target->family == family) {
+                       if (!try_module_get(target->me))
+                               return ERR_PTR(-ENOENT);
+
                        return &nft_target->ops;
+               }
        }
 
        target = xt_request_find_target(family, tg_name, rev);
index 61e6c407476a618df386c2f14839033398aae14b..c82df0a48fcd8a649921b3fcb6b5d1edd39c6295 100644 (file)
@@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set,
                .key_offset = offsetof(struct nft_hash_elem, key),
                .key_len = set->klen,
                .hashfn = jhash,
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
 
        return rhashtable_init(priv, &params);
index 30dbe34915ae2b1fcf4d0ca3149369bdf3913f0f..45e1b30e4fb214f850af2590425ab2a9748c6476 100644 (file)
@@ -378,12 +378,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        mutex_lock(&recent_mutex);
        t = recent_table_lookup(recent_net, info->name);
        if (t != NULL) {
-               if (info->hit_count > t->nstamps_max_mask) {
-                       pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
-                               info->hit_count, t->nstamps_max_mask + 1,
-                               info->name);
-                       ret = -EINVAL;
-                       goto out;
+               if (nstamp_mask > t->nstamps_max_mask) {
+                       spin_lock_bh(&recent_lock);
+                       recent_table_flush(t);
+                       t->nstamps_max_mask = nstamp_mask;
+                       spin_unlock_bh(&recent_lock);
                }
 
                t->refcnt++;
index 1ba67931eb1b168fabfa78790f5ed53713188f9d..13332dbf291d6e530b77c3c8a7d155a07788ebc3 100644 (file)
@@ -243,12 +243,13 @@ static int
 extract_icmp6_fields(const struct sk_buff *skb,
                     unsigned int outside_hdrlen,
                     int *protocol,
-                    struct in6_addr **raddr,
-                    struct in6_addr **laddr,
+                    const struct in6_addr **raddr,
+                    const struct in6_addr **laddr,
                     __be16 *rport,
-                    __be16 *lport)
+                    __be16 *lport,
+                    struct ipv6hdr *ipv6_var)
 {
-       struct ipv6hdr *inside_iph, _inside_iph;
+       const struct ipv6hdr *inside_iph;
        struct icmp6hdr *icmph, _icmph;
        __be16 *ports, _ports[2];
        u8 inside_nexthdr;
@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
        if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
                return 1;
 
-       inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph);
+       inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
+                                       sizeof(*ipv6_var), ipv6_var);
        if (inside_iph == NULL)
                return 1;
        inside_nexthdr = inside_iph->nexthdr;
 
-       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
+                                             sizeof(*ipv6_var),
                                         &inside_nexthdr, &inside_fragoff);
        if (inside_hdrlen < 0)
                return 1; /* hjm: Packet has no/incomplete transport layer headers. */
@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
 static bool
 socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
 {
-       struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
        struct sock *sk = skb->sk;
-       struct in6_addr *daddr = NULL, *saddr = NULL;
+       const struct in6_addr *daddr = NULL, *saddr = NULL;
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        int thoff = 0, uninitialized_var(tproto);
        const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
 
        } else if (tproto == IPPROTO_ICMPV6) {
                if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
-                                        &sport, &dport))
+                                        &sport, &dport, &ipv6_var))
                        return false;
        } else {
                return false;
index a96025c0583fcdd98acc63e30beb057ebc299dfb..6b0f21950e09d41969d3922220dab2eb3a3d63ae 100644 (file)
@@ -3124,8 +3124,6 @@ static int __init netlink_proto_init(void)
                .key_len = sizeof(u32), /* portid */
                .hashfn = jhash,
                .max_shift = 16, /* 64K */
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
 
        if (err != 0)
index b7d818c594234bae63e2b603cc2ee3afe10fa1c2..ed6b0f8dd1bbdfa0876c3425b24c83cf4aa315a6 100644 (file)
@@ -6,6 +6,7 @@ config OPENVSWITCH
        tristate "Open vSwitch"
        depends on INET
        select LIBCRC32C
+       select MPLS
        select NET_MPLS_GSO
        ---help---
          Open vSwitch is a multilayer Ethernet switch targeted at virtualized
index ae5e77cdc0ca1f34ff7f9c99d65ba0c8bda9ace6..5bae7243c5777e38df7be95454b8164724c769cf 100644 (file)
@@ -2194,14 +2194,55 @@ static int __net_init ovs_init_net(struct net *net)
        return 0;
 }
 
-static void __net_exit ovs_exit_net(struct net *net)
+static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
+                                           struct list_head *head)
 {
-       struct datapath *dp, *dp_next;
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+       struct datapath *dp;
+
+       list_for_each_entry(dp, &ovs_net->dps, list_node) {
+               int i;
+
+               for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+                       struct vport *vport;
+
+                       hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
+                               struct netdev_vport *netdev_vport;
+
+                               if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
+                                       continue;
+
+                               netdev_vport = netdev_vport_priv(vport);
+                               if (dev_net(netdev_vport->dev) == dnet)
+                                       list_add(&vport->detach_list, head);
+                       }
+               }
+       }
+}
+
+static void __net_exit ovs_exit_net(struct net *dnet)
+{
+       struct datapath *dp, *dp_next;
+       struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
+       struct vport *vport, *vport_next;
+       struct net *net;
+       LIST_HEAD(head);
 
        ovs_lock();
        list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
                __dp_destroy(dp);
+
+       rtnl_lock();
+       for_each_net(net)
+               list_vports_from_net(net, dnet, &head);
+       rtnl_unlock();
+
+       /* Detach all vports from given namespace. */
+       list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
+               list_del(&vport->detach_list);
+               ovs_dp_detach_port(vport);
+       }
+
        ovs_unlock();
 
        cancel_work_sync(&ovs_net->dp_notify_work);
index 216f20b90aa596b49592beee89a996cbe868d8ba..22b18c145c9221675e031de2617fcdd800405170 100644 (file)
@@ -2253,14 +2253,20 @@ static int masked_set_action_to_set_action_attr(const struct nlattr *a,
                                                struct sk_buff *skb)
 {
        const struct nlattr *ovs_key = nla_data(a);
+       struct nlattr *nla;
        size_t key_len = nla_len(ovs_key) / 2;
 
        /* Revert the conversion we did from a non-masked set action to
         * masked set action.
         */
-       if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key))
+       nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
+       if (!nla)
                return -EMSGSIZE;
 
+       if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
+               return -EMSGSIZE;
+
+       nla_nest_end(skb, nla);
        return 0;
 }
 
index f8ae295fb0011f7cc5dea75737833b7086641c77..bc85331a6c60cae9182bd1348d35d81117cf2943 100644 (file)
@@ -103,6 +103,7 @@ struct vport_portids {
  * @ops: Class structure.
  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
  * @err_stats: Points to error statistics used and maintained by vport
+ * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
        struct rcu_head rcu;
@@ -117,6 +118,7 @@ struct vport {
        struct pcpu_sw_netstats __percpu *percpu_stats;
 
        struct vport_err_stats err_stats;
+       struct list_head detach_list;
 };
 
 /**
index 404c9735aee90e5ebd6a7d3506762ec1358eadaa..8167aecc1594cef48d43cc32fff3fa021befcd07 100644 (file)
@@ -704,6 +704,10 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
 
        if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
                if (!frozen) {
+                       if (!BLOCK_NUM_PKTS(pbd)) {
+                               /* An empty block. Just refresh the timer. */
+                               goto refresh_timer;
+                       }
                        prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
                        if (!prb_dispatch_next_block(pkc, po))
                                goto refresh_timer;
@@ -804,7 +808,11 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
                h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
                h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
        } else {
-               /* Ok, we tmo'd - so get the current time */
+               /* Ok, we tmo'd - so get the current time.
+                *
+                * It shouldn't really happen as we don't close empty
+                * blocks. See prb_retire_rx_blk_timer_expired().
+                */
                struct timespec ts;
                getnstimeofday(&ts);
                h1->ts_last_pkt.ts_sec = ts.tv_sec;
@@ -1355,14 +1363,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
                return 0;
        }
 
+       if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
+               skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
+               if (!skb)
+                       return 0;
+       }
        switch (f->type) {
        case PACKET_FANOUT_HASH:
        default:
-               if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
-                       skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
-                       if (!skb)
-                               return 0;
-               }
                idx = fanout_demux_hash(f, skb, num);
                break;
        case PACKET_FANOUT_LB:
index c6be17a959a6e4981ecfff38af85805df6d8b26e..e0547f521f20d79c688c773286d609066c990a1d 100644 (file)
@@ -218,7 +218,8 @@ static void rxrpc_resend(struct rxrpc_call *call)
        struct rxrpc_header *hdr;
        struct sk_buff *txb;
        unsigned long *p_txb, resend_at;
-       int loop, stop;
+       bool stop;
+       int loop;
        u8 resend;
 
        _enter("{%d,%d,%d,%d},",
@@ -226,7 +227,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
               atomic_read(&call->sequence),
               CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
 
-       stop = 0;
+       stop = false;
        resend = 0;
        resend_at = 0;
 
@@ -255,11 +256,11 @@ static void rxrpc_resend(struct rxrpc_call *call)
                        _proto("Tx DATA %%%u { #%d }",
                               ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
                        if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
-                               stop = 0;
+                               stop = true;
                                sp->resend_at = jiffies + 3;
                        } else {
                                sp->resend_at =
-                                       jiffies + rxrpc_resend_timeout * HZ;
+                                       jiffies + rxrpc_resend_timeout;
                        }
                }
 
index 899d0319f2b273e47a73efe4caac7c14bfdfe219..2274e723a3df6fdf393543281cd56bcb6284b41c 100644 (file)
@@ -348,7 +348,7 @@ config NET_SCH_PLUG
 comment "Classification"
 
 config NET_CLS
-       boolean
+       bool
 
 config NET_CLS_BASIC
        tristate "Elementary classification (BASIC)"
index baef987fe2c036ae61f7108455ce1d828ec40e6c..8b0470e418dc6e9475464768d629969087e66b37 100644 (file)
@@ -286,7 +286,7 @@ replay:
                        RCU_INIT_POINTER(*back, next);
 
                        tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
-                       tcf_destroy(tp);
+                       tcf_destroy(tp, true);
                        err = 0;
                        goto errout;
                }
@@ -301,14 +301,20 @@ replay:
                        err = -EEXIST;
                        if (n->nlmsg_flags & NLM_F_EXCL) {
                                if (tp_created)
-                                       tcf_destroy(tp);
+                                       tcf_destroy(tp, true);
                                goto errout;
                        }
                        break;
                case RTM_DELTFILTER:
                        err = tp->ops->delete(tp, fh);
-                       if (err == 0)
+                       if (err == 0) {
                                tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+                               if (tcf_destroy(tp, false)) {
+                                       struct tcf_proto *next = rtnl_dereference(tp->next);
+
+                                       RCU_INIT_POINTER(*back, next);
+                               }
+                       }
                        goto errout;
                case RTM_GETTFILTER:
                        err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
@@ -329,7 +335,7 @@ replay:
                tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
        } else {
                if (tp_created)
-                       tcf_destroy(tp);
+                       tcf_destroy(tp, true);
        }
 
 errout:
index fc399db86f11b17cb05536df8f211fe79c7a3232..0b8c3ace671f1fff47cf2a12f7e6428bb5704b9f 100644 (file)
@@ -96,11 +96,14 @@ static void basic_delete_filter(struct rcu_head *head)
        kfree(f);
 }
 
-static void basic_destroy(struct tcf_proto *tp)
+static bool basic_destroy(struct tcf_proto *tp, bool force)
 {
        struct basic_head *head = rtnl_dereference(tp->root);
        struct basic_filter *f, *n;
 
+       if (!force && !list_empty(&head->flist))
+               return false;
+
        list_for_each_entry_safe(f, n, &head->flist, link) {
                list_del_rcu(&f->link);
                tcf_unbind_filter(tp, &f->res);
@@ -108,6 +111,7 @@ static void basic_destroy(struct tcf_proto *tp)
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static int basic_delete(struct tcf_proto *tp, unsigned long arg)
index 6f7ed8f8e6ee76379381955c0fff5a67599254bd..243c9f225a734799cde949a1ff91c1a554e5d584 100644 (file)
@@ -137,11 +137,14 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
-static void cls_bpf_destroy(struct tcf_proto *tp)
+static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_bpf_head *head = rtnl_dereference(tp->root);
        struct cls_bpf_prog *prog, *tmp;
 
+       if (!force && !list_empty(&head->plist))
+               return false;
+
        list_for_each_entry_safe(prog, tmp, &head->plist, link) {
                list_del_rcu(&prog->link);
                tcf_unbind_filter(tp, &prog->res);
@@ -150,6 +153,7 @@ static void cls_bpf_destroy(struct tcf_proto *tp)
 
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
index 221697ab0247c5e786c70e181a43c1145a30748b..ea611b21641241737223f34334c0189df00d11e7 100644 (file)
@@ -143,14 +143,18 @@ errout:
        return err;
 }
 
-static void cls_cgroup_destroy(struct tcf_proto *tp)
+static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 
+       if (!force)
+               return false;
+
        if (head) {
                RCU_INIT_POINTER(tp->root, NULL);
                call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
        }
+       return true;
 }
 
 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
index 461410394d085917ee8b8039ab5cfd5f5a6cf7c3..a620c4e288a51f55771399f6c1f81328bab9f7c7 100644 (file)
@@ -557,17 +557,21 @@ static int flow_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void flow_destroy(struct tcf_proto *tp)
+static bool flow_destroy(struct tcf_proto *tp, bool force)
 {
        struct flow_head *head = rtnl_dereference(tp->root);
        struct flow_filter *f, *next;
 
+       if (!force && !list_empty(&head->filters))
+               return false;
+
        list_for_each_entry_safe(f, next, &head->filters, list) {
                list_del_rcu(&f->list);
                call_rcu(&f->rcu, flow_destroy_filter);
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
index a5269f76004c2974a2e1e650433a7ba394710965..715e01e5910a94a9af40534ec5c0e820b96adc99 100644 (file)
@@ -33,6 +33,7 @@
 
 struct fw_head {
        u32                     mask;
+       bool                    mask_set;
        struct fw_filter __rcu  *ht[HTSIZE];
        struct rcu_head         rcu;
 };
@@ -113,6 +114,14 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
 
 static int fw_init(struct tcf_proto *tp)
 {
+       struct fw_head *head;
+
+       head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
+       if (head == NULL)
+               return -ENOBUFS;
+
+       head->mask_set = false;
+       rcu_assign_pointer(tp->root, head);
        return 0;
 }
 
@@ -124,14 +133,20 @@ static void fw_delete_filter(struct rcu_head *head)
        kfree(f);
 }
 
-static void fw_destroy(struct tcf_proto *tp)
+static bool fw_destroy(struct tcf_proto *tp, bool force)
 {
        struct fw_head *head = rtnl_dereference(tp->root);
        struct fw_filter *f;
        int h;
 
        if (head == NULL)
-               return;
+               return true;
+
+       if (!force) {
+               for (h = 0; h < HTSIZE; h++)
+                       if (rcu_access_pointer(head->ht[h]))
+                               return false;
+       }
 
        for (h = 0; h < HTSIZE; h++) {
                while ((f = rtnl_dereference(head->ht[h])) != NULL) {
@@ -143,6 +158,7 @@ static void fw_destroy(struct tcf_proto *tp)
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
@@ -286,17 +302,11 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
        if (!handle)
                return -EINVAL;
 
-       if (head == NULL) {
-               u32 mask = 0xFFFFFFFF;
+       if (!head->mask_set) {
+               head->mask = 0xFFFFFFFF;
                if (tb[TCA_FW_MASK])
-                       mask = nla_get_u32(tb[TCA_FW_MASK]);
-
-               head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
-               if (head == NULL)
-                       return -ENOBUFS;
-               head->mask = mask;
-
-               rcu_assign_pointer(tp->root, head);
+                       head->mask = nla_get_u32(tb[TCA_FW_MASK]);
+               head->mask_set = true;
        }
 
        f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
index 2ecd24688554e76e38d98b7338072a37d4c0cea1..08a3b0a6f5abd3fd674d7bca32c62c2626608b15 100644 (file)
@@ -258,6 +258,13 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
 
 static int route4_init(struct tcf_proto *tp)
 {
+       struct route4_head *head;
+
+       head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
+       if (head == NULL)
+               return -ENOBUFS;
+
+       rcu_assign_pointer(tp->root, head);
        return 0;
 }
 
@@ -270,13 +277,20 @@ route4_delete_filter(struct rcu_head *head)
        kfree(f);
 }
 
-static void route4_destroy(struct tcf_proto *tp)
+static bool route4_destroy(struct tcf_proto *tp, bool force)
 {
        struct route4_head *head = rtnl_dereference(tp->root);
        int h1, h2;
 
        if (head == NULL)
-               return;
+               return true;
+
+       if (!force) {
+               for (h1 = 0; h1 <= 256; h1++) {
+                       if (rcu_access_pointer(head->table[h1]))
+                               return false;
+               }
+       }
 
        for (h1 = 0; h1 <= 256; h1++) {
                struct route4_bucket *b;
@@ -301,6 +315,7 @@ static void route4_destroy(struct tcf_proto *tp)
        }
        RCU_INIT_POINTER(tp->root, NULL);
        kfree_rcu(head, rcu);
+       return true;
 }
 
 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
@@ -484,13 +499,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
 
        err = -ENOBUFS;
-       if (head == NULL) {
-               head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
-               if (head == NULL)
-                       goto errout;
-               rcu_assign_pointer(tp->root, head);
-       }
-
        f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
        if (!f)
                goto errout;
index edd8ade3fbc1f4358b4275940e6f62b3d814b3dd..02fa82792dab8334d1dc14408f7ed42a4db0c141 100644 (file)
@@ -291,13 +291,20 @@ rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
        kfree_rcu(f, rcu);
 }
 
-static void rsvp_destroy(struct tcf_proto *tp)
+static bool rsvp_destroy(struct tcf_proto *tp, bool force)
 {
        struct rsvp_head *data = rtnl_dereference(tp->root);
        int h1, h2;
 
        if (data == NULL)
-               return;
+               return true;
+
+       if (!force) {
+               for (h1 = 0; h1 < 256; h1++) {
+                       if (rcu_access_pointer(data->ht[h1]))
+                               return false;
+               }
+       }
 
        RCU_INIT_POINTER(tp->root, NULL);
 
@@ -319,6 +326,7 @@ static void rsvp_destroy(struct tcf_proto *tp)
                }
        }
        kfree_rcu(data, rcu);
+       return true;
 }
 
 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
index bd49bf547a479f139b25e0507b090d51c137c519..a557dbaf5afedaa7a3a3a18c53a83238f6d32420 100644 (file)
@@ -468,11 +468,14 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
        }
 }
 
-static void tcindex_destroy(struct tcf_proto *tp)
+static bool tcindex_destroy(struct tcf_proto *tp, bool force)
 {
        struct tcindex_data *p = rtnl_dereference(tp->root);
        struct tcf_walker walker;
 
+       if (!force)
+               return false;
+
        pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
        walker.count = 0;
        walker.skip = 0;
@@ -481,6 +484,7 @@ static void tcindex_destroy(struct tcf_proto *tp)
 
        RCU_INIT_POINTER(tp->root, NULL);
        call_rcu(&p->rcu, __tcindex_destroy);
+       return true;
 }
 
 
index 09487afbfd5187a312ab155df5da43548d0c326b..375e51b71c80560b8acf37079725e63d7786cdbf 100644 (file)
@@ -460,13 +460,35 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
        return -ENOENT;
 }
 
-static void u32_destroy(struct tcf_proto *tp)
+static bool ht_empty(struct tc_u_hnode *ht)
+{
+       unsigned int h;
+
+       for (h = 0; h <= ht->divisor; h++)
+               if (rcu_access_pointer(ht->ht[h]))
+                       return false;
+
+       return true;
+}
+
+static bool u32_destroy(struct tcf_proto *tp, bool force)
 {
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
 
        WARN_ON(root_ht == NULL);
 
+       if (!force) {
+               if (root_ht) {
+                       if (root_ht->refcnt > 1)
+                               return false;
+                       if (root_ht->refcnt == 1) {
+                               if (!ht_empty(root_ht))
+                                       return false;
+                       }
+               }
+       }
+
        if (root_ht && --root_ht->refcnt == 0)
                u32_destroy_hnode(tp, root_ht);
 
@@ -491,6 +513,7 @@ static void u32_destroy(struct tcf_proto *tp)
        }
 
        tp->data = NULL;
+       return true;
 }
 
 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
index 6742200b13071b6e63c200767a77a653d2e10c06..fbb7ebfc58c6761f6afb58e62646908b10e2bf09 100644 (file)
@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
                                 * to replay the request.
                                 */
                                module_put(em->ops->owner);
+                               em->ops = NULL;
                                err = -EAGAIN;
                        }
 #endif
index 243b7d169d6183f662ab7f30d0e93492b29e79e3..ad9eed70bc8f8e16c3118c6527374a952823e2c0 100644 (file)
@@ -1858,11 +1858,15 @@ reclassify:
 }
 EXPORT_SYMBOL(tc_classify);
 
-void tcf_destroy(struct tcf_proto *tp)
+bool tcf_destroy(struct tcf_proto *tp, bool force)
 {
-       tp->ops->destroy(tp);
-       module_put(tp->ops->owner);
-       kfree_rcu(tp, rcu);
+       if (tp->ops->destroy(tp, force)) {
+               module_put(tp->ops->owner);
+               kfree_rcu(tp, rcu);
+               return true;
+       }
+
+       return false;
 }
 
 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
@@ -1871,7 +1875,7 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl)
 
        while ((tp = rtnl_dereference(*fl)) != NULL) {
                RCU_INIT_POINTER(*fl, tp->next);
-               tcf_destroy(tp);
+               tcf_destroy(tp, true);
        }
 }
 EXPORT_SYMBOL(tcf_destroy_chain);
index abbb7dcd16897125863098cb48f6a6411488225c..59eeed43eda2d2651916dcc8698c12c7d7249e4e 100644 (file)
@@ -217,6 +217,8 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
 
        for (i = 0; i < arg->npages && arg->pages[i]; i++)
                __free_page(arg->pages[i]);
+
+       kfree(arg->pages);
 }
 
 static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
index 224a82f24d3c75e60c702bd89215b7934ded1ea8..1095be9c80ab809900d2bf0afbde9c63b6034a9d 100644 (file)
@@ -463,6 +463,8 @@ static int rsc_parse(struct cache_detail *cd,
                /* number of additional gid's */
                if (get_int(&mesg, &N))
                        goto out;
+               if (N < 0 || N > NGROUPS_MAX)
+                       goto out;
                status = -ENOMEM;
                rsci.cred.cr_group_info = groups_alloc(N);
                if (rsci.cred.cr_group_info == NULL)
index 651f49ab601fbdd75eed30ddc0a29c9cb4369e54..9dd0ea8db463acc9daba0c51be89b1f17ec8f17d 100644 (file)
@@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        struct rpc_xprt *xprt = req->rq_xprt;
        struct svc_serv *bc_serv = xprt->bc_serv;
 
+       spin_lock(&xprt->bc_pa_lock);
+       list_del(&req->rq_bc_pa_list);
+       spin_unlock(&xprt->bc_pa_lock);
+
        req->rq_private_buf.len = copied;
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
        dprintk("RPC:       add callback request to list\n");
        spin_lock(&bc_serv->sv_cb_lock);
-       list_del(&req->rq_bc_pa_list);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
        wake_up(&bc_serv->sv_cb_waitq);
        spin_unlock(&bc_serv->sv_cb_lock);
index 155754588fd65ab656736ab342a4851b4de0bb8b..86a47e17cfaf7cba058a0ed49512954789b9d9a0 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config NET_SWITCHDEV
-       boolean "Switch (and switch-ish) device support (EXPERIMENTAL)"
+       bool "Switch (and switch-ish) device support (EXPERIMENTAL)"
        depends on INET
        ---help---
          This module provides glue between core networking code and device
index 8c1e558db11893b7f70eaa5e5201da07a3df8812..aba6aa2656d87bb375f684018323bff1236f16d5 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <net/ip_fib.h>
 #include <net/switchdev.h>
 
 /**
@@ -32,7 +33,7 @@ int netdev_switch_parent_id_get(struct net_device *dev,
                return -EOPNOTSUPP;
        return ops->ndo_switch_parent_id_get(dev, psid);
 }
-EXPORT_SYMBOL(netdev_switch_parent_id_get);
+EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
 
 /**
  *     netdev_switch_port_stp_update - Notify switch device port of STP
@@ -51,7 +52,7 @@ int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
        WARN_ON(!ops->ndo_switch_parent_id_get);
        return ops->ndo_switch_port_stp_update(dev, state);
 }
-EXPORT_SYMBOL(netdev_switch_port_stp_update);
+EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
 
 static DEFINE_MUTEX(netdev_switch_mutex);
 static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
@@ -73,7 +74,7 @@ int register_netdev_switch_notifier(struct notifier_block *nb)
        mutex_unlock(&netdev_switch_mutex);
        return err;
 }
-EXPORT_SYMBOL(register_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
 
 /**
  *     unregister_netdev_switch_notifier - Unregister nofifier
@@ -91,7 +92,7 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb)
        mutex_unlock(&netdev_switch_mutex);
        return err;
 }
-EXPORT_SYMBOL(unregister_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
 
 /**
  *     call_netdev_switch_notifiers - Call nofifiers
@@ -114,7 +115,7 @@ int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
        mutex_unlock(&netdev_switch_mutex);
        return err;
 }
-EXPORT_SYMBOL(call_netdev_switch_notifiers);
+EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
 
 /**
  *     netdev_switch_port_bridge_setlink - Notify switch device port of bridge
@@ -139,7 +140,7 @@ int netdev_switch_port_bridge_setlink(struct net_device *dev,
 
        return ops->ndo_bridge_setlink(dev, nlh, flags);
 }
-EXPORT_SYMBOL(netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink);
 
 /**
  *     netdev_switch_port_bridge_dellink - Notify switch device port of bridge
@@ -164,7 +165,7 @@ int netdev_switch_port_bridge_dellink(struct net_device *dev,
 
        return ops->ndo_bridge_dellink(dev, nlh, flags);
 }
-EXPORT_SYMBOL(netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink);
 
 /**
  *     ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
@@ -194,7 +195,7 @@ int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
 
        return ret;
 }
-EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink);
 
 /**
  *     ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
@@ -224,4 +225,168 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
 
        return ret;
 }
-EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink);
+
+static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+       struct net_device *lower_dev;
+       struct net_device *port_dev;
+       struct list_head *iter;
+
+       /* Recusively search down until we find a sw port dev.
+        * (A sw port dev supports ndo_switch_parent_id_get).
+        */
+
+       if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD &&
+           ops->ndo_switch_parent_id_get)
+               return dev;
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               port_dev = netdev_switch_get_lowest_dev(lower_dev);
+               if (port_dev)
+                       return port_dev;
+       }
+
+       return NULL;
+}
+
+static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
+{
+       struct netdev_phys_item_id psid;
+       struct netdev_phys_item_id prev_psid;
+       struct net_device *dev = NULL;
+       int nhsel;
+
+       /* For this route, all nexthop devs must be on the same switch. */
+
+       for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+               const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+               if (!nh->nh_dev)
+                       return NULL;
+
+               dev = netdev_switch_get_lowest_dev(nh->nh_dev);
+               if (!dev)
+                       return NULL;
+
+               if (netdev_switch_parent_id_get(dev, &psid))
+                       return NULL;
+
+               if (nhsel > 0) {
+                       if (prev_psid.id_len != psid.id_len)
+                               return NULL;
+                       if (memcmp(prev_psid.id, psid.id, psid.id_len))
+                               return NULL;
+               }
+
+               prev_psid = psid;
+       }
+
+       return dev;
+}
+
+/**
+ *     netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch
+ *
+ *     @dst: route's IPv4 destination address
+ *     @dst_len: destination address length (prefix length)
+ *     @fi: route FIB info structure
+ *     @tos: route TOS
+ *     @type: route type
+ *     @tb_id: route table ID
+ *
+ *     Add IPv4 route entry to switch device.
+ */
+int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 tb_id)
+{
+       struct net_device *dev;
+       const struct net_device_ops *ops;
+       int err = 0;
+
+       /* Don't offload route if using custom ip rules or if
+        * IPv4 FIB offloading has been disabled completely.
+        */
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+       if (fi->fib_net->ipv4.fib_has_custom_rules)
+               return 0;
+#endif
+
+       if (fi->fib_net->ipv4.fib_offload_disabled)
+               return 0;
+
+       dev = netdev_switch_get_dev_by_nhs(fi);
+       if (!dev)
+               return 0;
+       ops = dev->netdev_ops;
+
+       if (ops->ndo_switch_fib_ipv4_add) {
+               err = ops->ndo_switch_fib_ipv4_add(dev, htonl(dst), dst_len,
+                                                  fi, tos, type, tb_id);
+               if (!err)
+                       fi->fib_flags |= RTNH_F_EXTERNAL;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
+
+/**
+ *     netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch
+ *
+ *     @dst: route's IPv4 destination address
+ *     @dst_len: destination address length (prefix length)
+ *     @fi: route FIB info structure
+ *     @tos: route TOS
+ *     @type: route type
+ *     @tb_id: route table ID
+ *
+ *     Delete IPv4 route entry from switch device.
+ */
+int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+                              u8 tos, u8 type, u32 tb_id)
+{
+       struct net_device *dev;
+       const struct net_device_ops *ops;
+       int err = 0;
+
+       if (!(fi->fib_flags & RTNH_F_EXTERNAL))
+               return 0;
+
+       dev = netdev_switch_get_dev_by_nhs(fi);
+       if (!dev)
+               return 0;
+       ops = dev->netdev_ops;
+
+       if (ops->ndo_switch_fib_ipv4_del) {
+               err = ops->ndo_switch_fib_ipv4_del(dev, htonl(dst), dst_len,
+                                                  fi, tos, type, tb_id);
+               if (!err)
+                       fi->fib_flags &= ~RTNH_F_EXTERNAL;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del);
+
+/**
+ *     netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation
+ *
+ *     @fi: route FIB info structure
+ */
+void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+{
+       /* There was a problem installing this route to the offload
+        * device.  For now, until we come up with more refined
+        * policy handling, abruptly end IPv4 fib offloading for
+        * for entire net by flushing offload device(s) of all
+        * IPv4 routes, and mark IPv4 fib offloading broken from
+        * this point forward.
+        */
+
+       fib_flush_external(fi->fib_net);
+       fi->fib_net->ipv4.fib_offload_disabled = true;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort);
index 91c8a8e031db718a067fa2ed4ef9f2924ddb7c25..c25a3a149dc4e6d50b20f2ba3ffc5a77d213fdde 100644 (file)
@@ -26,3 +26,11 @@ config TIPC_MEDIA_IB
        help
          Saying Y here will enable support for running TIPC on
          IP-over-InfiniBand devices.
+config TIPC_MEDIA_UDP
+       bool "IP/UDP media type support"
+       depends on TIPC
+       select NET_UDP_TUNNEL
+       help
+         Saying Y here will enable support for running TIPC over IP/UDP
+       bool
+       default y
index 599b1a540d2b0390db6ecbcc8f71aea9c9ad5ca9..57e460be46920cb270c0ed4eddb8d957462eb767 100644 (file)
@@ -10,5 +10,6 @@ tipc-y        += addr.o bcast.o bearer.o \
           netlink.o netlink_compat.o node.o socket.o eth_media.o \
           server.o socket.o
 
+tipc-$(CONFIG_TIPC_MEDIA_UDP)  += udp_media.o
 tipc-$(CONFIG_TIPC_MEDIA_IB)   += ib_media.o
 tipc-$(CONFIG_SYSCTL)          += sysctl.o
index af6deeb397a891e7c110a8c1e61bfbdf893496b7..840db89e428308bbf2e51a51711319baa4a85a65 100644 (file)
@@ -47,6 +47,9 @@ static struct tipc_media * const media_info_array[] = {
        &eth_media_info,
 #ifdef CONFIG_TIPC_MEDIA_IB
        &ib_media_info,
+#endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+       &udp_media_info,
 #endif
        NULL
 };
@@ -216,7 +219,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
  * tipc_enable_bearer - enable bearer with the given name
  */
 static int tipc_enable_bearer(struct net *net, const char *name,
-                             u32 disc_domain, u32 priority)
+                             u32 disc_domain, u32 priority,
+                             struct nlattr *attr[])
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
@@ -304,7 +308,7 @@ restart:
 
        strcpy(b_ptr->name, name);
        b_ptr->media = m_ptr;
-       res = m_ptr->enable_media(net, b_ptr);
+       res = m_ptr->enable_media(net, b_ptr, attr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
@@ -372,7 +376,8 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
        kfree_rcu(b_ptr, rcu);
 }
 
-int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+                        struct nlattr *attr[])
 {
        struct net_device *dev;
        char *driver_name = strchr((const char *)b->name, ':') + 1;
@@ -791,7 +796,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
        }
 
        rtnl_lock();
-       err = tipc_enable_bearer(net, bearer, domain, prio);
+       err = tipc_enable_bearer(net, bearer, domain, prio, attrs);
        if (err) {
                rtnl_unlock();
                return err;
index 097aff08ad5b056f3fed74079d6ad8b030f98219..5cad243ee8fc646efccfe72f26ad49ebdfe42f4f 100644 (file)
@@ -41,7 +41,7 @@
 #include <net/genetlink.h>
 
 #define MAX_BEARERS    2
-#define MAX_MEDIA      2
+#define MAX_MEDIA      3
 #define MAX_NODES      4096
 #define WSIZE          32
 
@@ -59,6 +59,7 @@
  */
 #define TIPC_MEDIA_TYPE_ETH    1
 #define TIPC_MEDIA_TYPE_IB     2
+#define TIPC_MEDIA_TYPE_UDP    3
 
 /**
  * struct tipc_node_map - set of node identifiers
@@ -104,7 +105,8 @@ struct tipc_media {
        int (*send_msg)(struct net *net, struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
-       int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
+       int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr,
+                           struct nlattr *attr[]);
        void (*disable_media)(struct tipc_bearer *b_ptr);
        int (*addr2str)(struct tipc_media_addr *addr,
                        char *strbuf,
@@ -183,6 +185,9 @@ extern struct tipc_media eth_media_info;
 #ifdef CONFIG_TIPC_MEDIA_IB
 extern struct tipc_media ib_media_info;
 #endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+extern struct tipc_media udp_media_info;
+#endif
 
 int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
@@ -197,7 +202,8 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
-int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+                        struct nlattr *attrs[]);
 void tipc_disable_l2_media(struct tipc_bearer *b);
 int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
                     struct tipc_bearer *b, struct tipc_media_addr *dest);
index feef3753615d24f9067f859fcf7b0476b2708775..5967506833ce9bf5ca93478424f46d88a9e607c3 100644 (file)
@@ -86,7 +86,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
 
        msg = buf_msg(buf);
        tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
-                     INT_H_SIZE, dest_domain);
+                     MAX_H_SIZE, dest_domain);
        msg_set_non_seq(msg, 1);
        msg_set_node_sig(msg, tn->random);
        msg_set_dest_domain(msg, dest_domain);
@@ -249,7 +249,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
 
        /* Send response, if necessary */
        if (respond && (mtyp == DSC_REQ_MSG)) {
-               rbuf = tipc_buf_acquire(INT_H_SIZE);
+               rbuf = tipc_buf_acquire(MAX_H_SIZE);
                if (rbuf) {
                        tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
                        tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
@@ -359,8 +359,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
        req = kmalloc(sizeof(*req), GFP_ATOMIC);
        if (!req)
                return -ENOMEM;
-
-       req->buf = tipc_buf_acquire(INT_H_SIZE);
+       req->buf = tipc_buf_acquire(MAX_H_SIZE);
        if (!req->buf) {
                kfree(req);
                return -ENOMEM;
index c1cc8d7a5d52b7cdb5963e1ab70323398faf0a14..fa167846d1ab50f411ef0ea73080f7c4db82207d 100644 (file)
@@ -87,7 +87,7 @@ struct plist;
  * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
  *       are word aligned for quicker access
  */
-#define BUF_HEADROOM LL_MAX_HEADER
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
 
 struct tipc_skb_cb {
        void *handle;
index dcb797c60806df5ccca610c2b0745efba2cbb3d5..934947f038b67738c8e5c475e6768d75e6cf7f5b 100644 (file)
@@ -1318,12 +1318,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
                err = 0;
                if (!skb_queue_empty(&sk->sk_receive_queue))
                        break;
-               err = sock_intr_errno(timeo);
-               if (signal_pending(current))
-                       break;
                err = -EAGAIN;
                if (!timeo)
                        break;
+               err = sock_intr_errno(timeo);
+               if (signal_pending(current))
+                       break;
        }
        finish_wait(sk_sleep(sk), &wait);
        *timeop = timeo;
@@ -2026,12 +2026,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
                err = -EINVAL;
                if (sock->state != SS_LISTENING)
                        break;
-               err = sock_intr_errno(timeo);
-               if (signal_pending(current))
-                       break;
                err = -EAGAIN;
                if (!timeo)
                        break;
+               err = sock_intr_errno(timeo);
+               if (signal_pending(current))
+                       break;
        }
        finish_wait(sk_sleep(sk), &wait);
        return err;
@@ -2363,8 +2363,6 @@ int tipc_sk_rht_init(struct net *net)
                .hashfn = jhash,
                .max_shift = 20, /* 1M */
                .min_shift = 8,  /* 256 */
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
 
        return rhashtable_init(&tn->sk_rht, &rht_params);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
new file mode 100644 (file)
index 0000000..fc2fb11
--- /dev/null
@@ -0,0 +1,442 @@
+/* net/tipc/udp_media.c: IP bearer support for TIPC
+ *
+ * Copyright (c) 2015, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/inet.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/udp_tunnel.h>
+#include <linux/tipc_netlink.h>
+#include "core.h"
+#include "bearer.h"
+
+/* IANA assigned UDP port */
+#define UDP_PORT_DEFAULT       6118
+
+static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
+       [TIPC_NLA_UDP_UNSPEC]   = {.type = NLA_UNSPEC},
+       [TIPC_NLA_UDP_LOCAL]    = {.type = NLA_BINARY,
+                                  .len = sizeof(struct sockaddr_storage)},
+       [TIPC_NLA_UDP_REMOTE]   = {.type = NLA_BINARY,
+                                  .len = sizeof(struct sockaddr_storage)},
+};
+
+/**
+ * struct udp_media_addr - IP/UDP addressing information
+ *
+ * This is the bearer level originating address used in neighbor discovery
+ * messages, and all fields should be in network byte order
+ */
+struct udp_media_addr {
+       __be16  proto;
+       __be16  udp_port;
+       union {
+               struct in_addr ipv4;
+               struct in6_addr ipv6;
+       };
+};
+
+/**
+ * struct udp_bearer - ip/udp bearer data structure
+ * @bearer:    associated generic tipc bearer
+ * @ubsock:    bearer associated socket
+ * @ifindex:   local address scope
+ * @work:      used to schedule deferred work on a bearer
+ */
+struct udp_bearer {
+       struct tipc_bearer __rcu *bearer;
+       struct socket *ubsock;
+       u32 ifindex;
+       struct work_struct work;
+};
+
+/* udp_media_addr_set - convert a ip/udp address to a TIPC media address */
+static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
+                                   struct udp_media_addr *ua)
+{
+       memset(addr, 0, sizeof(struct tipc_media_addr));
+       addr->media_id = TIPC_MEDIA_TYPE_UDP;
+       memcpy(addr->value, ua, sizeof(struct udp_media_addr));
+       if (ntohs(ua->proto) == ETH_P_IP) {
+               if (ipv4_is_multicast(ua->ipv4.s_addr))
+                       addr->broadcast = 1;
+       } else if (ntohs(ua->proto) == ETH_P_IPV6) {
+               if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST)
+                       addr->broadcast = 1;
+       } else {
+               pr_err("Invalid UDP media address\n");
+       }
+}
+
+/* tipc_udp_addr2str - convert ip/udp address to string */
+static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
+{
+       struct udp_media_addr *ua = (struct udp_media_addr *)&a->value;
+
+       if (ntohs(ua->proto) == ETH_P_IP)
+               snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port));
+       else if (ntohs(ua->proto) == ETH_P_IPV6)
+               snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port));
+       else
+               pr_err("Invalid UDP media address\n");
+       return 0;
+}
+
+/* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */
+static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a,
+                            char *msg)
+{
+       struct udp_media_addr *ua;
+
+       ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET);
+       if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP)
+               return -EINVAL;
+       tipc_udp_media_addr_set(a, ua);
+       return 0;
+}
+
+/* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */
+static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a)
+{
+       memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
+       msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP;
+       memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value,
+              sizeof(struct udp_media_addr));
+       return 0;
+}
+
+/* tipc_send_msg - enqueue a send request */
+static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
+                            struct tipc_bearer *b,
+                            struct tipc_media_addr *dest)
+{
+       int ttl, err = 0;
+       struct udp_bearer *ub;
+       struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
+       struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+       struct sk_buff *clone;
+       struct rtable *rt;
+
+       clone = skb_clone(skb, GFP_ATOMIC);
+       skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+       ub = rcu_dereference_rtnl(b->media_ptr);
+       if (!ub) {
+               err = -ENODEV;
+               goto tx_error;
+       }
+       if (dst->proto == htons(ETH_P_IP)) {
+               struct flowi4 fl = {
+                       .daddr = dst->ipv4.s_addr,
+                       .saddr = src->ipv4.s_addr,
+                       .flowi4_mark = clone->mark,
+                       .flowi4_proto = IPPROTO_UDP
+               };
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt)) {
+                       err = PTR_ERR(rt);
+                       goto tx_error;
+               }
+               ttl = ip4_dst_hoplimit(&rt->dst);
+               err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr,
+                                         dst->ipv4.s_addr, 0, ttl, 0,
+                                         src->udp_port, dst->udp_port,
+                                         false, true);
+               if (err < 0) {
+                       ip_rt_put(rt);
+                       goto tx_error;
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               struct dst_entry *ndst;
+               struct flowi6 fl6 = {
+                       .flowi6_oif = ub->ifindex,
+                       .daddr = dst->ipv6,
+                       .saddr = src->ipv6,
+                       .flowi6_proto = IPPROTO_UDP
+               };
+               err = ip6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+               if (err)
+                       goto tx_error;
+               ttl = ip6_dst_hoplimit(ndst);
+               err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6,
+                                          &dst->ipv6, 0, ttl, src->udp_port,
+                                          dst->udp_port, false);
+#endif
+       }
+       return err;
+
+tx_error:
+       kfree_skb(clone);
+       return err;
+}
+
+/* tipc_udp_recv - read data from bearer socket */
+static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
+{
+       struct udp_bearer *ub;
+       struct tipc_bearer *b;
+
+       ub = rcu_dereference_sk_user_data(sk);
+       if (!ub) {
+               pr_err_ratelimited("Failed to get UDP bearer reference");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       skb_pull(skb, sizeof(struct udphdr));
+       rcu_read_lock();
+       b = rcu_dereference_rtnl(ub->bearer);
+
+       if (b) {
+               tipc_rcv(sock_net(sk), skb, b);
+               rcu_read_unlock();
+               return 0;
+       }
+       rcu_read_unlock();
+       kfree_skb(skb);
+       return 0;
+}
+
+static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
+{
+       int err = 0;
+       struct ip_mreqn mreqn;
+       struct sock *sk = ub->ubsock->sk;
+
+       if (ntohs(remote->proto) == ETH_P_IP) {
+               if (!ipv4_is_multicast(remote->ipv4.s_addr))
+                       return 0;
+               mreqn.imr_multiaddr = remote->ipv4;
+               mreqn.imr_ifindex = ub->ifindex;
+               err = __ip_mc_join_group(sk, &mreqn);
+       } else {
+               if (!ipv6_addr_is_multicast(&remote->ipv6))
+                       return 0;
+               err = __ipv6_sock_mc_join(sk, ub->ifindex, &remote->ipv6);
+       }
+       return err;
+}
+
+/**
+ * parse_options - build local/remote addresses from configuration
+ * @attrs:     netlink config data
+ * @ub:                UDP bearer instance
+ * @local:     local bearer IP address/port
+ * @remote:    peer or multicast IP/port
+ */
+static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub,
+                        struct udp_media_addr *local,
+                        struct udp_media_addr *remote)
+{
+       struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+       struct sockaddr_storage *sa_local, *sa_remote;
+
+       if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
+               goto err;
+       if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX,
+                            attrs[TIPC_NLA_BEARER_UDP_OPTS],
+                            tipc_nl_udp_policy))
+               goto err;
+       if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) {
+               sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]);
+               sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]);
+       } else {
+err:
+               pr_err("Invalid UDP bearer configuration");
+               return -EINVAL;
+       }
+       if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) {
+               struct sockaddr_in *ip4;
+
+               ip4 = (struct sockaddr_in *)sa_local;
+               local->proto = htons(ETH_P_IP);
+               local->udp_port = ip4->sin_port;
+               local->ipv4.s_addr = ip4->sin_addr.s_addr;
+
+               ip4 = (struct sockaddr_in *)sa_remote;
+               remote->proto = htons(ETH_P_IP);
+               remote->udp_port = ip4->sin_port;
+               remote->ipv4.s_addr = ip4->sin_addr.s_addr;
+               return 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) {
+               struct sockaddr_in6 *ip6;
+
+               ip6 = (struct sockaddr_in6 *)sa_local;
+               local->proto = htons(ETH_P_IPV6);
+               local->udp_port = ip6->sin6_port;
+               local->ipv6 = ip6->sin6_addr;
+               ub->ifindex = ip6->sin6_scope_id;
+
+               ip6 = (struct sockaddr_in6 *)sa_remote;
+               remote->proto = htons(ETH_P_IPV6);
+               remote->udp_port = ip6->sin6_port;
+               remote->ipv6 = ip6->sin6_addr;
+               return 0;
+#endif
+       }
+       return -EADDRNOTAVAIL;
+}
+
+/**
+ * tipc_udp_enable - callback to create a new udp bearer instance
+ * @net:       network namespace
+ * @b:         pointer to generic tipc_bearer
+ * @attrs:     netlink bearer configuration
+ *
+ * validate the bearer parameters and initialize the udp bearer
+ * rtnl_lock should be held
+ */
+static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+                          struct nlattr *attrs[])
+{
+       int err = -EINVAL;
+       struct udp_bearer *ub;
+       struct udp_media_addr *remote;
+       struct udp_media_addr local = {0};
+       struct udp_port_cfg udp_conf = {0};
+       struct udp_tunnel_sock_cfg tuncfg = {NULL};
+
+       ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
+       if (!ub)
+               return -ENOMEM;
+
+       remote = (struct udp_media_addr *)&b->bcast_addr.value;
+       memset(remote, 0, sizeof(struct udp_media_addr));
+       err = parse_options(attrs, ub, &local, remote);
+       if (err)
+               goto err;
+
+       b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
+       b->bcast_addr.broadcast = 1;
+       rcu_assign_pointer(b->media_ptr, ub);
+       rcu_assign_pointer(ub->bearer, b);
+       tipc_udp_media_addr_set(&b->addr, &local);
+       if (local.proto == htons(ETH_P_IP)) {
+               struct net_device *dev;
+
+               dev = __ip_dev_find(net, local.ipv4.s_addr, false);
+               if (!dev) {
+                       err = -ENODEV;
+                       goto err;
+               }
+               udp_conf.family = AF_INET;
+               udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+               udp_conf.use_udp_checksums = false;
+               ub->ifindex = dev->ifindex;
+               b->mtu = dev->mtu - sizeof(struct iphdr)
+                       - sizeof(struct udphdr);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (local.proto == htons(ETH_P_IPV6)) {
+               udp_conf.family = AF_INET6;
+               udp_conf.use_udp6_tx_checksums = true;
+               udp_conf.use_udp6_rx_checksums = true;
+               udp_conf.local_ip6 = in6addr_any;
+               b->mtu = 1280;
+#endif
+       } else {
+               err = -EAFNOSUPPORT;
+               goto err;
+       }
+       udp_conf.local_udp_port = local.udp_port;
+       err = udp_sock_create(net, &udp_conf, &ub->ubsock);
+       if (err)
+               goto err;
+       tuncfg.sk_user_data = ub;
+       tuncfg.encap_type = 1;
+       tuncfg.encap_rcv = tipc_udp_recv;
+       tuncfg.encap_destroy = NULL;
+       setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
+
+       if (enable_mcast(ub, remote))
+               goto err;
+       return 0;
+err:
+       kfree(ub);
+       return err;
+}
+
+/* cleanup_bearer - break the socket/bearer association */
+static void cleanup_bearer(struct work_struct *work)
+{
+       struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+
+       if (ub->ubsock)
+               udp_tunnel_sock_release(ub->ubsock);
+       synchronize_net();
+       kfree(ub);
+}
+
+/* tipc_udp_disable - detach bearer from socket */
+static void tipc_udp_disable(struct tipc_bearer *b)
+{
+       struct udp_bearer *ub;
+
+       ub = rcu_dereference_rtnl(b->media_ptr);
+       if (!ub) {
+               pr_err("UDP bearer instance not found\n");
+               return;
+       }
+       if (ub->ubsock)
+               sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
+       RCU_INIT_POINTER(b->media_ptr, NULL);
+       RCU_INIT_POINTER(ub->bearer, NULL);
+
+       /* sock_release need to be done outside of rtnl lock */
+       INIT_WORK(&ub->work, cleanup_bearer);
+       schedule_work(&ub->work);
+}
+
+struct tipc_media udp_media_info = {
+       .send_msg       = tipc_udp_send_msg,
+       .enable_media   = tipc_udp_enable,
+       .disable_media  = tipc_udp_disable,
+       .addr2str       = tipc_udp_addr2str,
+       .addr2msg       = tipc_udp_addr2msg,
+       .msg2addr       = tipc_udp_msg2addr,
+       .priority       = TIPC_DEF_LINK_PRI,
+       .tolerance      = TIPC_DEF_LINK_TOL,
+       .window         = TIPC_DEF_LINK_WIN,
+       .type_id        = TIPC_MEDIA_TYPE_UDP,
+       .hwaddr_len     = 0,
+       .name           = "udp"
+};
index 3af0ecf1cc16859abecb7451df5cabe35d6dbf08..2a0bbd22854bd97b377139200f9e6b5e0ec2662f 100644 (file)
@@ -1199,6 +1199,7 @@ out_fail_wq:
        regulatory_exit();
 out_fail_reg:
        debugfs_remove(ieee80211_debugfs_dir);
+       nl80211_exit();
 out_fail_nl80211:
        unregister_netdevice_notifier(&cfg80211_netdev_notifier);
 out_fail_notifier:
index e24fc585c8834782295558481f0f592be743e6dc..6309b9c0bcd5ac821f141ade56a2f8f3c089c486 100644 (file)
@@ -533,7 +533,7 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev,
        else if (wdev->wext.ibss.bssid)
                memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
        else
-               memset(ap_addr->sa_data, 0, ETH_ALEN);
+               eth_zero_addr(ap_addr->sa_data);
 
        wdev_unlock(wdev);
 
index d78fd8b54515e630b67bf38d710b2b698f703c4c..864b782c0202562e234e18f359b9eec8bbe9b216 100644 (file)
@@ -2654,10 +2654,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                        return err;
        }
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
-
        err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
                                  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
                                  &flags);
@@ -2666,6 +2662,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
        wdev = rdev_add_virtual_intf(rdev,
                                nla_data(info->attrs[NL80211_ATTR_IFNAME]),
                                type, err ? NULL : &flags, &params);
@@ -5683,8 +5683,8 @@ static int nl80211_parse_random_mac(struct nlattr **attrs,
        int i;
 
        if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
-               memset(mac_addr, 0, ETH_ALEN);
-               memset(mac_addr_mask, 0, ETH_ALEN);
+               eth_zero_addr(mac_addr);
+               eth_zero_addr(mac_addr_mask);
                mac_addr[0] = 0x2;
                mac_addr_mask[0] = 0x3;
 
@@ -12528,9 +12528,7 @@ static int cfg80211_net_detect_results(struct sk_buff *msg,
                        }
 
                        for (j = 0; j < match->n_channels; j++) {
-                               if (nla_put_u32(msg,
-                                               NL80211_ATTR_WIPHY_FREQ,
-                                               match->channels[j])) {
+                               if (nla_put_u32(msg, j, match->channels[j])) {
                                        nla_nest_cancel(msg, nl_freqs);
                                        nla_nest_cancel(msg, nl_match);
                                        goto out;
index b586d0dcb09ebc9382fa0bd22016264e5bdd21c2..48dfc7b4e98130e4d8d5b265fceadd9004ed4f5e 100644 (file)
@@ -228,7 +228,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
 
 /* We keep a static world regulatory domain in case of the absence of CRDA */
 static const struct ieee80211_regdomain world_regdom = {
-       .n_reg_rules = 6,
+       .n_reg_rules = 8,
        .alpha2 =  "00",
        .reg_rules = {
                /* IEEE 802.11b/g, channels 1..11 */
index b17b3692f8c239d918a072d2fab9031eb91073b4..a00ee8897dc67047f5d2a295d1eb3dcc42c33673 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/tracepoint.h>
 
 #include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
 #include <net/cfg80211.h>
 #include "core.h"
 
@@ -15,7 +16,7 @@
        if (given_mac)                                               \
                memcpy(__entry->entry_mac, given_mac, ETH_ALEN);     \
        else                                                         \
-               memset(__entry->entry_mac, 0, ETH_ALEN);             \
+               eth_zero_addr(__entry->entry_mac);                   \
        } while (0)
 #define MAC_PR_FMT "%pM"
 #define MAC_PR_ARG(entry_mac) (__entry->entry_mac)
@@ -1077,7 +1078,7 @@ TRACE_EVENT(rdev_auth,
                if (req->bss)
                        MAC_ASSIGN(bssid, req->bss->bssid);
                else
-                       memset(__entry->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(__entry->bssid);
                __entry->auth_type = req->auth_type;
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT,
@@ -1103,7 +1104,7 @@ TRACE_EVENT(rdev_assoc,
                if (req->bss)
                        MAC_ASSIGN(bssid, req->bss->bssid);
                else
-                       memset(__entry->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(__entry->bssid);
                MAC_ASSIGN(prev_bssid, req->prev_bssid);
                __entry->use_mfp = req->use_mfp;
                __entry->flags = req->flags;
@@ -1153,7 +1154,7 @@ TRACE_EVENT(rdev_disassoc,
                if (req->bss)
                        MAC_ASSIGN(bssid, req->bss->bssid);
                else
-                       memset(__entry->bssid, 0, ETH_ALEN);
+                       eth_zero_addr(__entry->bssid);
                __entry->reason_code = req->reason_code;
                __entry->local_state_change = req->local_state_change;
        ),
index 368611c0573997e45f433c68a0a2c0e9de17270b..a4e8af3321d2ba5e07cd316b3ea8b65d1aecb204 100644 (file)
@@ -322,7 +322,7 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev,
        if (wdev->current_bss)
                memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
        else
-               memset(ap_addr->sa_data, 0, ETH_ALEN);
+               eth_zero_addr(ap_addr->sa_data);
        wdev_unlock(wdev);
 
        return 0;
index edd2794569db96a052579b3700b30ac9335510a4..d3437b82ac256cb7bca2527f0cfe43e07f66a1cb 100644 (file)
@@ -129,17 +129,15 @@ cc-disable-warning = $(call try-run,\
        $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-version
-# Usage gcc-ver := $(call cc-version)
 cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
 
 # cc-fullversion
-# Usage gcc-ver := $(call cc-fullversion)
 cc-fullversion = $(shell $(CONFIG_SHELL) \
        $(srctree)/scripts/gcc-version.sh -p $(CC))
 
 # cc-ifversion
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
-cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
+cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
@@ -157,13 +155,12 @@ ld-option = $(call try-run,\
 ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
 
 # ld-version
-# Usage: $(call ld-version)
 # Note this is mainly for HJ Lu's 3 number binutil versions
 ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
 
 # ld-ifversion
 # Usage:  $(call ld-ifversion, -ge, 22252, y)
-ld-ifversion = $(shell [ $(call ld-version) $(1) $(2) ] && echo $(3))
+ld-ifversion = $(shell [ $(ld-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
 ######
 
index 627f8cbbedb88ca29667bbf1f88eb2004d5ee461..55c96cb8070f1130352ab85f77440dfd497d5677 100644 (file)
@@ -70,9 +70,6 @@ ifneq ($(strip $(__clean-files)),)
 endif
 ifneq ($(strip $(__clean-dirs)),)
        +$(call cmd,cleandir)
-endif
-ifneq ($(strip $(clean-rule)),)
-       +$(clean-rule)
 endif
        @:
 
diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py
new file mode 100644 (file)
index 0000000..4680fb1
--- /dev/null
@@ -0,0 +1 @@
+# nothing to do for the initialization of this package
index f88d90f20228e8783b5ca39accc436140af7b3ff..28df18dd1147f60b555fea428afe42513b592410 100644 (file)
@@ -59,6 +59,7 @@ static void conf_message(const char *fmt, ...)
        va_start(ap, fmt);
        if (conf_message_callback)
                conf_message_callback(fmt, ap);
+       va_end(ap);
 }
 
 const char *conf_get_configname(void)
index 81b0c61bb9e2060c18fb9dd315e8a9e0ae8a195a..2ab91b9b100dc6f6cfba663d05580e14930a3def 100755 (executable)
@@ -77,6 +77,11 @@ while true; do
        esac
 done
 
+if [ "$#" -lt 2 ] ; then
+       usage
+       exit
+fi
+
 INITFILE=$1
 shift;
 
index 59726243c2ebab1a9263019a5ff8eb759b2fdda7..88dbf23b697082aa899c6b414b65cf0e4ecbf67d 100755 (executable)
@@ -217,9 +217,20 @@ else
 fi
 maintainer="$name <$email>"
 
+# Try to determine distribution
+if [ -n "$KDEB_CHANGELOG_DIST" ]; then
+        distribution=$KDEB_CHANGELOG_DIST
+elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ]; then
+        : # nothing to do in this case
+else
+        distribution="unstable"
+        echo >&2 "Using default distribution of 'unstable' in the changelog"
+        echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
+fi
+
 # Generate a simple changelog template
 cat <<EOF > debian/changelog
-linux-upstream ($packageversion) unstable; urgency=low
+linux-upstream ($packageversion) $distribution; urgency=low
 
   * Custom built Linux kernel.
 
@@ -233,10 +244,10 @@ This is a packacked upstream version of the Linux kernel.
 The sources may be found at most Linux ftp sites, including:
 ftp://ftp.kernel.org/pub/linux/kernel
 
-Copyright: 1991 - 2009 Linus Torvalds and others.
+Copyright: 1991 - 2015 Linus Torvalds and others.
 
 The git repository for mainline kernel development is at:
-git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
index 97130f88838bc2ad385b5ccd69bf0dfc51acae95..e4ea6266386662c2c88445743ff2e107b665230a 100644 (file)
@@ -112,9 +112,9 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
        return aa_dfa_next(dfa, start, 0);
 }
 
-static inline bool mediated_filesystem(struct inode *inode)
+static inline bool mediated_filesystem(struct dentry *dentry)
 {
-       return !(inode->i_sb->s_flags & MS_NOUSER);
+       return !(dentry->d_sb->s_flags & MS_NOUSER);
 }
 
 #endif /* __APPARMOR_H */
index 65ca451a764db1a38db4c8a68be1acd015f14d4f..107db88b1d5f9d1d5dda20c0636f229738fec8bd 100644 (file)
@@ -226,7 +226,7 @@ static int common_perm_rm(int op, struct path *dir,
        struct inode *inode = dentry->d_inode;
        struct path_cond cond = { };
 
-       if (!inode || !dir->mnt || !mediated_filesystem(inode))
+       if (!inode || !dir->mnt || !mediated_filesystem(dentry))
                return 0;
 
        cond.uid = inode->i_uid;
@@ -250,7 +250,7 @@ static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
 {
        struct path_cond cond = { current_fsuid(), mode };
 
-       if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
+       if (!dir->mnt || !mediated_filesystem(dir->dentry))
                return 0;
 
        return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
@@ -285,7 +285,7 @@ static int apparmor_path_truncate(struct path *path)
                                  path->dentry->d_inode->i_mode
        };
 
-       if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
+       if (!path->mnt || !mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
@@ -305,7 +305,7 @@ static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(old_dentry->d_inode))
+       if (!mediated_filesystem(old_dentry))
                return 0;
 
        profile = aa_current_profile();
@@ -320,7 +320,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(old_dentry->d_inode))
+       if (!mediated_filesystem(old_dentry))
                return 0;
 
        profile = aa_current_profile();
@@ -346,7 +346,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
 
 static int apparmor_path_chmod(struct path *path, umode_t mode)
 {
-       if (!mediated_filesystem(path->dentry->d_inode))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
@@ -358,7 +358,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
                                   path->dentry->d_inode->i_mode
        };
 
-       if (!mediated_filesystem(path->dentry->d_inode))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
@@ -366,7 +366,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 
 static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
 {
-       if (!mediated_filesystem(dentry->d_inode))
+       if (!mediated_filesystem(dentry))
                return 0;
 
        return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
@@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(file_inode(file)))
+       if (!mediated_filesystem(file->f_path.dentry))
                return 0;
 
        /* If in exec, permission is handled by bprm hooks.
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
        BUG_ON(!fprofile);
 
        if (!file->f_path.mnt ||
-           !mediated_filesystem(file_inode(file)))
+           !mediated_filesystem(file->f_path.dentry))
                return 0;
 
        profile = __aa_current_profile();
index 35b394a75d762dd6a4e935f3ffe1d5b4566a2885..71e0e3a15b9dc3bbae6b73cd1d8134768f67d2c5 100644 (file)
@@ -114,7 +114,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *    security_path hooks as a deleted dentry except without an inode
         *    allocated.
         */
-       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+       if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
            !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
index 8e7ca62078abe85f988a90b796f265fc2172d526..131a3c49f766444f167f88d19b712ef80ee47a66 100644 (file)
@@ -203,7 +203,7 @@ void securityfs_remove(struct dentry *dentry)
        mutex_lock(&parent->d_inode->i_mutex);
        if (positive(dentry)) {
                if (dentry->d_inode) {
-                       if (S_ISDIR(dentry->d_inode->i_mode))
+                       if (d_is_dir(dentry))
                                simple_rmdir(parent->d_inode, dentry);
                        else
                                simple_unlink(parent->d_inode, dentry);
index b76235ae4786f2c34bc26072fad69bf457b124ce..73c457bf5a4aea01eb56b36613ee69f929428765 100644 (file)
@@ -16,7 +16,7 @@ config INTEGRITY
 if INTEGRITY
 
 config INTEGRITY_SIGNATURE
-       boolean "Digital signature verification using multiple keyrings"
+       bool "Digital signature verification using multiple keyrings"
        depends on KEYS
        default n
        select SIGNATURE
@@ -30,7 +30,7 @@ config INTEGRITY_SIGNATURE
          usually only added from initramfs.
 
 config INTEGRITY_ASYMMETRIC_KEYS
-       boolean "Enable asymmetric keys support"
+       bool "Enable asymmetric keys support"
        depends on INTEGRITY_SIGNATURE
        default n
         select ASYMMETRIC_KEY_TYPE
index df586fa00ef1e9891efae42d629e952c05eaec4d..bf19723cf1178959f82eb9782dd3ee49ea46acaf 100644 (file)
@@ -1,5 +1,5 @@
 config EVM
-       boolean "EVM support"
+       bool "EVM support"
        select KEYS
        select ENCRYPTED_KEYS
        select CRYPTO_HMAC
index 29c39e0b03ed7e3048d5fed858f31d40dc2f8d3d..4d1a54190388df96dddb7ff951c681dc28bab866 100644 (file)
@@ -1799,7 +1799,7 @@ static inline int may_rename(struct inode *old_dir,
 
        old_dsec = old_dir->i_security;
        old_isec = old_dentry->d_inode->i_security;
-       old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+       old_is_dir = d_is_dir(old_dentry);
        new_dsec = new_dir->i_security;
 
        ad.type = LSM_AUDIT_DATA_DENTRY;
@@ -1822,14 +1822,14 @@ static inline int may_rename(struct inode *old_dir,
 
        ad.u.dentry = new_dentry;
        av = DIR__ADD_NAME | DIR__SEARCH;
-       if (new_dentry->d_inode)
+       if (d_is_positive(new_dentry))
                av |= DIR__REMOVE_NAME;
        rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
        if (rc)
                return rc;
-       if (new_dentry->d_inode) {
+       if (d_is_positive(new_dentry)) {
                new_isec = new_dentry->d_inode->i_security;
-               new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+               new_is_dir = d_is_dir(new_dentry);
                rc = avc_has_perm(sid, new_isec->sid,
                                  new_isec->sclass,
                                  (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
index ed94f6f836e75baf9086f553c78a2d325ea99285..c934311812f1a777093c44a89543dcae924b8568 100644 (file)
@@ -855,7 +855,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
        rc = smk_curacc(isp, MAY_WRITE, &ad);
        rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
 
-       if (rc == 0 && new_dentry->d_inode != NULL) {
+       if (rc == 0 && d_is_positive(new_dentry)) {
                isp = smk_of_inode(new_dentry->d_inode);
                smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
                rc = smk_curacc(isp, MAY_WRITE, &ad);
@@ -961,7 +961,7 @@ static int smack_inode_rename(struct inode *old_inode,
        rc = smk_curacc(isp, MAY_READWRITE, &ad);
        rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
 
-       if (rc == 0 && new_dentry->d_inode != NULL) {
+       if (rc == 0 && d_is_positive(new_dentry)) {
                isp = smk_of_inode(new_dentry->d_inode);
                smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
                rc = smk_curacc(isp, MAY_READWRITE, &ad);
index 400390790745212764bd99c9178dceea031382ef..c151a1869597f8155a0296f89fafa61cc65f447d 100644 (file)
@@ -905,11 +905,9 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
            !tomoyo_get_realpath(&buf2, path2))
                goto out;
        switch (operation) {
-               struct dentry *dentry;
        case TOMOYO_TYPE_RENAME:
        case TOMOYO_TYPE_LINK:
-               dentry = path1->dentry;
-               if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
+               if (!d_is_dir(path1->dentry))
                        break;
                /* fall through */
        case TOMOYO_TYPE_PIVOT_ROOT:
index b03a638b420c18243776c45fda5884b422392c46..279e24f613051fddb8ca16375ab9031e6a703b03 100644 (file)
@@ -1552,6 +1552,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
                        if (! snd_pcm_playback_empty(substream)) {
                                snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
                                snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
+                       } else {
+                               runtime->status->state = SNDRV_PCM_STATE_SETUP;
                        }
                        break;
                case SNDRV_PCM_STATE_RUNNING:
index 9b6470cdcf246f6e89ecf428d84dee0a25ad361e..7ba937399ac783c5cbf3c92f7497227d2feaa3cc 100644 (file)
@@ -269,6 +269,9 @@ do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chse
 {
        int  i;
 
+       if (control >= ARRAY_SIZE(chan->control))
+               return;
+
        /* Switches */
        if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
                /* These are all switches; either off or on so set to 0 or 127 */
index 0d580186ef1ac379bcd2cb699ac2f33baeac9029..5cc356db5351d903a7199b233fdfffc2bd8a674e 100644 (file)
@@ -33,7 +33,7 @@
  */
 #define MAX_MIDI_RX_BLOCKS     8
 
-#define TRANSFER_DELAY_TICKS   0x2e00 /* 479.17 Âµs */
+#define TRANSFER_DELAY_TICKS   0x2e00 /* 479.17 microseconds */
 
 /* isochronous header parameters */
 #define ISO_DATA_LENGTH_SHIFT  16
@@ -78,7 +78,7 @@ static void pcm_period_tasklet(unsigned long data);
 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
                      enum amdtp_stream_direction dir, enum cip_flags flags)
 {
-       s->unit = fw_unit_get(unit);
+       s->unit = unit;
        s->direction = dir;
        s->flags = flags;
        s->context = ERR_PTR(-1);
@@ -102,7 +102,6 @@ void amdtp_stream_destroy(struct amdtp_stream *s)
 {
        WARN_ON(amdtp_stream_running(s));
        mutex_destroy(&s->mutex);
-       fw_unit_put(s->unit);
 }
 EXPORT_SYMBOL(amdtp_stream_destroy);
 
index fc19c99654aa0284d400e58c51abc1a0c35495ee..611b7dae7ee54c932394c713022fd4501c84f7ca 100644 (file)
@@ -116,11 +116,22 @@ end:
        return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void
 bebob_card_free(struct snd_card *card)
 {
        struct snd_bebob *bebob = card->private_data;
 
+       snd_bebob_stream_destroy_duplex(bebob);
+       fw_unit_put(bebob->unit);
+
+       kfree(bebob->maudio_special_quirk);
+
        if (bebob->card_index >= 0) {
                mutex_lock(&devices_mutex);
                clear_bit(bebob->card_index, devices_used);
@@ -205,7 +216,7 @@ bebob_probe(struct fw_unit *unit,
        card->private_free = bebob_card_free;
 
        bebob->card = card;
-       bebob->unit = unit;
+       bebob->unit = fw_unit_get(unit);
        bebob->spec = spec;
        mutex_init(&bebob->mutex);
        spin_lock_init(&bebob->lock);
@@ -306,10 +317,11 @@ static void bebob_remove(struct fw_unit *unit)
        if (bebob == NULL)
                return;
 
-       kfree(bebob->maudio_special_quirk);
+       /* Awake bus-reset waiters. */
+       if (!completion_done(&bebob->bus_reset))
+               complete_all(&bebob->bus_reset);
 
-       snd_bebob_stream_destroy_duplex(bebob);
-       snd_card_disconnect(bebob->card);
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(bebob->card);
 }
 
index 0ebcabfdc7ce0162c9a77ed30ca038e6588cae63..98e4fc8121a1f4bdad82d892d79fa5c8241086af 100644 (file)
@@ -410,8 +410,6 @@ break_both_connections(struct snd_bebob *bebob)
 static void
 destroy_both_connections(struct snd_bebob *bebob)
 {
-       break_both_connections(bebob);
-
        cmp_connection_destroy(&bebob->in_conn);
        cmp_connection_destroy(&bebob->out_conn);
 }
@@ -712,22 +710,16 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob)
        mutex_unlock(&bebob->mutex);
 }
 
+/*
+ * This function should be called before starting streams or after stopping
+ * streams.
+ */
 void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob)
 {
-       mutex_lock(&bebob->mutex);
-
-       amdtp_stream_pcm_abort(&bebob->rx_stream);
-       amdtp_stream_pcm_abort(&bebob->tx_stream);
-
-       amdtp_stream_stop(&bebob->rx_stream);
-       amdtp_stream_stop(&bebob->tx_stream);
-
        amdtp_stream_destroy(&bebob->rx_stream);
        amdtp_stream_destroy(&bebob->tx_stream);
 
        destroy_both_connections(bebob);
-
-       mutex_unlock(&bebob->mutex);
 }
 
 /*
index fa9cf761b610ad81e537634590f6f33cff7e674e..07dbd01d7a6bd336d901fa78b83365b44307b1a0 100644 (file)
@@ -311,14 +311,21 @@ end:
        return err;
 }
 
+/*
+ * This function should be called before starting streams or after stopping
+ * streams.
+ */
 static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream)
 {
-       amdtp_stream_destroy(stream);
+       struct fw_iso_resources *resources;
 
        if (stream == &dice->tx_stream)
-               fw_iso_resources_destroy(&dice->tx_resources);
+               resources = &dice->tx_resources;
        else
-               fw_iso_resources_destroy(&dice->rx_resources);
+               resources = &dice->rx_resources;
+
+       amdtp_stream_destroy(stream);
+       fw_iso_resources_destroy(resources);
 }
 
 int snd_dice_stream_init_duplex(struct snd_dice *dice)
@@ -332,6 +339,8 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
                goto end;
 
        err = init_stream(dice, &dice->rx_stream);
+       if (err < 0)
+               destroy_stream(dice, &dice->tx_stream);
 end:
        return err;
 }
@@ -340,10 +349,7 @@ void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
 {
        snd_dice_transaction_clear_enable(dice);
 
-       stop_stream(dice, &dice->tx_stream);
        destroy_stream(dice, &dice->tx_stream);
-
-       stop_stream(dice, &dice->rx_stream);
        destroy_stream(dice, &dice->rx_stream);
 
        dice->substreams_counter = 0;
index 90d8f40ff72712f2ac67dc8f978855d59fd5f8d6..70a111d7f428af4a0487f80cf350767935c8cac0 100644 (file)
@@ -226,11 +226,20 @@ static void dice_card_strings(struct snd_dice *dice)
        strcpy(card->mixername, "DICE");
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void dice_card_free(struct snd_card *card)
 {
        struct snd_dice *dice = card->private_data;
 
+       snd_dice_stream_destroy_duplex(dice);
        snd_dice_transaction_destroy(dice);
+       fw_unit_put(dice->unit);
+
        mutex_destroy(&dice->mutex);
 }
 
@@ -251,7 +260,7 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
 
        dice = card->private_data;
        dice->card = card;
-       dice->unit = unit;
+       dice->unit = fw_unit_get(unit);
        card->private_free = dice_card_free;
 
        spin_lock_init(&dice->lock);
@@ -305,10 +314,7 @@ static void dice_remove(struct fw_unit *unit)
 {
        struct snd_dice *dice = dev_get_drvdata(&unit->device);
 
-       snd_card_disconnect(dice->card);
-
-       snd_dice_stream_destroy_duplex(dice);
-
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(dice->card);
 }
 
index 3e2ed8e82cbc49b4699c82ad30776ef8a4300253..2682e7e3e5c98511e8bd26fddf452427f1e83a6a 100644 (file)
@@ -173,11 +173,23 @@ end:
        return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void
 efw_card_free(struct snd_card *card)
 {
        struct snd_efw *efw = card->private_data;
 
+       snd_efw_stream_destroy_duplex(efw);
+       snd_efw_transaction_remove_instance(efw);
+       fw_unit_put(efw->unit);
+
+       kfree(efw->resp_buf);
+
        if (efw->card_index >= 0) {
                mutex_lock(&devices_mutex);
                clear_bit(efw->card_index, devices_used);
@@ -185,7 +197,6 @@ efw_card_free(struct snd_card *card)
        }
 
        mutex_destroy(&efw->mutex);
-       kfree(efw->resp_buf);
 }
 
 static int
@@ -218,7 +229,7 @@ efw_probe(struct fw_unit *unit,
        card->private_free = efw_card_free;
 
        efw->card = card;
-       efw->unit = unit;
+       efw->unit = fw_unit_get(unit);
        mutex_init(&efw->mutex);
        spin_lock_init(&efw->lock);
        init_waitqueue_head(&efw->hwdep_wait);
@@ -289,10 +300,7 @@ static void efw_remove(struct fw_unit *unit)
 {
        struct snd_efw *efw = dev_get_drvdata(&unit->device);
 
-       snd_efw_stream_destroy_duplex(efw);
-       snd_efw_transaction_remove_instance(efw);
-
-       snd_card_disconnect(efw->card);
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(efw->card);
 }
 
index 4f440e16366780f097d8c03daeb45e01fb5e7eb0..c55db1bddc80a0ceab4997279643f73840943cef 100644 (file)
@@ -100,17 +100,22 @@ end:
        return err;
 }
 
+/*
+ * This function should be called before starting the stream or after stopping
+ * the streams.
+ */
 static void
 destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
 {
-       stop_stream(efw, stream);
-
-       amdtp_stream_destroy(stream);
+       struct cmp_connection *conn;
 
        if (stream == &efw->tx_stream)
-               cmp_connection_destroy(&efw->out_conn);
+               conn = &efw->out_conn;
        else
-               cmp_connection_destroy(&efw->in_conn);
+               conn = &efw->in_conn;
+
+       amdtp_stream_destroy(stream);
+       cmp_connection_destroy(&efw->out_conn);
 }
 
 static int
@@ -319,12 +324,8 @@ void snd_efw_stream_update_duplex(struct snd_efw *efw)
 
 void snd_efw_stream_destroy_duplex(struct snd_efw *efw)
 {
-       mutex_lock(&efw->mutex);
-
        destroy_stream(efw, &efw->rx_stream);
        destroy_stream(efw, &efw->tx_stream);
-
-       mutex_unlock(&efw->mutex);
 }
 
 void snd_efw_stream_lock_changed(struct snd_efw *efw)
index bda845afb470703ff0c05bd69f1caac5bfeabbd6..29ccb3637164f82846022c37b2818e1a6ad4989e 100644 (file)
@@ -337,6 +337,10 @@ void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw,
        stop_stream(oxfw, stream);
 }
 
+/*
+ * This function should be called before starting the stream or after stopping
+ * the streams.
+ */
 void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
                                     struct amdtp_stream *stream)
 {
@@ -347,8 +351,6 @@ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
        else
                conn = &oxfw->in_conn;
 
-       stop_stream(oxfw, stream);
-
        amdtp_stream_destroy(stream);
        cmp_connection_destroy(conn);
 }
index 60e5cad0531aeb181d4cc6558f44b4cd6896f070..8c6ce019f437c310043a3686b634def68c92e424 100644 (file)
@@ -104,11 +104,23 @@ end:
        return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void oxfw_card_free(struct snd_card *card)
 {
        struct snd_oxfw *oxfw = card->private_data;
        unsigned int i;
 
+       snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+       if (oxfw->has_output)
+               snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+
+       fw_unit_put(oxfw->unit);
+
        for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
                kfree(oxfw->tx_stream_formats[i]);
                kfree(oxfw->rx_stream_formats[i]);
@@ -136,7 +148,7 @@ static int oxfw_probe(struct fw_unit *unit,
        oxfw = card->private_data;
        oxfw->card = card;
        mutex_init(&oxfw->mutex);
-       oxfw->unit = unit;
+       oxfw->unit = fw_unit_get(unit);
        oxfw->device_info = (const struct device_info *)id->driver_data;
        spin_lock_init(&oxfw->lock);
        init_waitqueue_head(&oxfw->hwdep_wait);
@@ -212,12 +224,7 @@ static void oxfw_remove(struct fw_unit *unit)
 {
        struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
 
-       snd_card_disconnect(oxfw->card);
-
-       snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
-       if (oxfw->has_output)
-               snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
-
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(oxfw->card);
 }
 
index dfcb5e929f9fc9643701eb33f04aa8efd7f101d3..a2ce773bdc624172b399afbb9207ed970b4c139f 100644 (file)
@@ -961,7 +961,6 @@ static int azx_alloc_cmd_io(struct azx *chip)
                dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
        return err;
 }
-EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
 
 static void azx_init_cmd_io(struct azx *chip)
 {
@@ -1026,7 +1025,6 @@ static void azx_init_cmd_io(struct azx *chip)
        azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
        spin_unlock_irq(&chip->reg_lock);
 }
-EXPORT_SYMBOL_GPL(azx_init_cmd_io);
 
 static void azx_free_cmd_io(struct azx *chip)
 {
@@ -1036,7 +1034,6 @@ static void azx_free_cmd_io(struct azx *chip)
        azx_writeb(chip, CORBCTL, 0);
        spin_unlock_irq(&chip->reg_lock);
 }
-EXPORT_SYMBOL_GPL(azx_free_cmd_io);
 
 static unsigned int azx_command_addr(u32 cmd)
 {
@@ -1316,7 +1313,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
        else
                return azx_corb_send_cmd(bus, val);
 }
-EXPORT_SYMBOL_GPL(azx_send_cmd);
 
 /* get a response */
 static unsigned int azx_get_response(struct hda_bus *bus,
@@ -1330,7 +1326,6 @@ static unsigned int azx_get_response(struct hda_bus *bus,
        else
                return azx_rirb_get_response(bus, addr);
 }
-EXPORT_SYMBOL_GPL(azx_get_response);
 
 #ifdef CONFIG_SND_HDA_DSP_LOADER
 /*
index 36d2f20db7a4201b999155e759e3e22b6fe3f753..4ca3d5d02436daf0ec7ec7274c675c4beae74423 100644 (file)
@@ -1966,7 +1966,7 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Panther Point */
        { PCI_DEVICE(0x8086, 0x1e20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
index 227990bc02e38cb459921ea5f08d7945eb540b51..375e94f4cf5265ba19378f96998658a5e4fed91c 100644 (file)
@@ -329,8 +329,8 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hda->regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(chip->remap_addr))
-               return PTR_ERR(chip->remap_addr);
+       if (IS_ERR(hda->regs))
+               return PTR_ERR(hda->regs);
 
        chip->remap_addr = hda->regs + HDA_BAR0;
        chip->addr = res->start + HDA_BAR0;
index ddb93083a2af1ea15ffe2374a81193481a2f0da1..b2b24a8b3dac8c49d2bb55c3142eecd967e73608 100644 (file)
@@ -4937,6 +4937,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
        /* ALC282 */
+       SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
index 6d36c5b7880504457430718a8f23f2dbc0e9a9ba..87eff3173ce924ae89596068b5bf8dcd38e7482e 100644 (file)
@@ -79,6 +79,7 @@ enum {
        STAC_ALIENWARE_M17X,
        STAC_92HD89XX_HP_FRONT_JACK,
        STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+       STAC_92HD73XX_ASUS_MOBO,
        STAC_92HD73XX_MODELS
 };
 
@@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
        [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
-       }
+       },
+       [STAC_92HD73XX_ASUS_MOBO] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       /* enable 5.1 and SPDIF out */
+                       { 0x0c, 0x01014411 },
+                       { 0x0d, 0x01014410 },
+                       { 0x0e, 0x01014412 },
+                       { 0x22, 0x014b1180 },
+                       { }
+               }
+       },
 };
 
 static const struct hda_model_fixup stac92hd73xx_models[] = {
@@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
        { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
        { .id = STAC_DELL_EQ, .name = "dell-eq" },
        { .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+       { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
        {}
 };
 
@@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
                                "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
                                "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
+                     STAC_92HD73XX_ASUS_MOBO),
        {} /* terminator */
 };
 
index 2c363fdca9fd00e0b82b99d029a4bd53a9728467..ca67f896d11757bc05be43804d367175b282dad0 100644 (file)
@@ -6082,6 +6082,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
                snd_pcm_hw_constraint_minmax(runtime,
                                             SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                             64, 8192);
+               snd_pcm_hw_constraint_minmax(runtime,
+                                            SNDRV_PCM_HW_PARAM_PERIODS,
+                                            2, 2);
                break;
        }
 
@@ -6156,6 +6159,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
                snd_pcm_hw_constraint_minmax(runtime,
                                             SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                             64, 8192);
+               snd_pcm_hw_constraint_minmax(runtime,
+                                            SNDRV_PCM_HW_PARAM_PERIODS,
+                                            2, 2);
                break;
        }
 
index d6fa9d5514e1923997066ec1e14c7dafac281387..7e21e8f85885e436cb896a806dde544d3cd8ec4d 100644 (file)
@@ -91,7 +91,8 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
                                  SNDRV_PCM_INFO_INTERLEAVED |
                                  SNDRV_PCM_INFO_PAUSE |
                                  SNDRV_PCM_INFO_RESUME |
-                                 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+                                 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+                                 SNDRV_PCM_INFO_DRAIN_TRIGGER,
        .formats                = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
                                  SNDRV_PCM_FMTBIT_S32_LE,
        .period_bytes_min       = PAGE_SIZE,
index 4864392bfcba7c0189a37b04f495ec20c2022f9d..c9917ca5de1a50285482da998fb03b305f81a5e0 100644 (file)
@@ -151,7 +151,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
                        hw.info |= SNDRV_PCM_INFO_BATCH;
 
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       addr_widths = dma_caps.dstn_addr_widths;
+                       addr_widths = dma_caps.dst_addr_widths;
                else
                        addr_widths = dma_caps.src_addr_widths;
        }
index 03fed6611d9e83d489e2ab66d94320c84eed4c18..2ed260b10f6dc02cd129550ba1067c878034cb07 100644 (file)
@@ -303,6 +303,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
                return err;
        }
 
+       /* Don't check the sample rate for devices which we know don't
+        * support reading */
+       if (snd_usb_get_sample_rate_quirk(chip))
+               return 0;
+
        if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
                                   USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
                                   UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
index 99b63a7902f302f4a432425d7a6c3d23dbd571f7..81b7da8e56d39e352a5b629bbdf73aa460889b4d 100644 (file)
@@ -302,14 +302,17 @@ static void line6_data_received(struct urb *urb)
 /*
        Read data from device.
 */
-int line6_read_data(struct usb_line6 *line6, int address, void *data,
-                   size_t datalen)
+int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
+                   unsigned datalen)
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
        unsigned char len;
        unsigned count;
 
+       if (address > 0xffff || datalen > 0xff)
+               return -EINVAL;
+
        /* query the serial number: */
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -370,14 +373,17 @@ EXPORT_SYMBOL_GPL(line6_read_data);
 /*
        Write data to device.
 */
-int line6_write_data(struct usb_line6 *line6, int address, void *data,
-                    size_t datalen)
+int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
+                    unsigned datalen)
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
        unsigned char status;
        int count;
 
+       if (address > 0xffff || datalen > 0xffff)
+               return -EINVAL;
+
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
                              0x0022, address, data, datalen,
index 5d20294d64f43be80334bb0a902deb7ff7e63da0..7da643e79e3b50426c3cbaf4d46184d7283f3848 100644 (file)
@@ -147,8 +147,8 @@ struct usb_line6 {
 
 extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
                                      int code2, int size);
-extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
-                          size_t datalen);
+extern int line6_read_data(struct usb_line6 *line6, unsigned address,
+                          void *data, unsigned datalen);
 extern int line6_read_serial_number(struct usb_line6 *line6,
                                    u32 *serial_number);
 extern int line6_send_raw_message_async(struct usb_line6 *line6,
@@ -161,8 +161,8 @@ extern void line6_start_timer(struct timer_list *timer, unsigned long msecs,
                              void (*function)(unsigned long),
                              unsigned long data);
 extern int line6_version_request_async(struct usb_line6 *line6);
-extern int line6_write_data(struct usb_line6 *line6, int address, void *data,
-                           size_t datalen);
+extern int line6_write_data(struct usb_line6 *line6, unsigned address,
+                           void *data, unsigned datalen);
 
 int line6_probe(struct usb_interface *interface,
                const struct usb_device_id *id,
index a7398412310bd53e00b84c2aa6c567ac451a4e7a..753a47de8459b7a0b505e72d2f660793d9ede885 100644 (file)
@@ -1111,6 +1111,11 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        }
 }
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+{
+       /* MS Lifecam HD-5000 doesn't support reading the sample rate. */
+       return chip->usb_id == USB_ID(0x045E, 0x076D);
+}
 
 /* Marantz/Denon USB DACs need a vendor cmd to switch
  * between PCM and native DSD mode
@@ -1122,6 +1127,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
        int err;
 
        switch (subs->stream->chip->usb_id) {
+       case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
 
@@ -1201,6 +1207,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
 
                switch (le16_to_cpu(dev->descriptor.idProduct)) {
+               case 0x1003: /* Denon DA300-USB */
                case 0x3005: /* Marantz HD-DAC1 */
                case 0x3006: /* Marantz SA-14S1 */
                        mdelay(20);
@@ -1262,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 
        /* Denon/Marantz devices with USB DAC functionality */
        switch (chip->usb_id) {
+       case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
                if (fp->altsetting == 2)
index 1b862386577d036d262a2d760f4f3551337ac8be..2cd71ed1201f93ea8e6b54e1d487a84751e09885 100644 (file)
@@ -21,6 +21,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              struct audioformat *fmt);
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip);
+
 int snd_usb_is_big_endian_format(struct snd_usb_audio *chip,
                                 struct audioformat *fp);
 
index 97bca4871ea34d8b1f4adc3422907b3cecb5c45c..a107b5e4da134121248445db0b48da733821cf34 100644 (file)
@@ -1,7 +1,13 @@
 # This creates the demonstration utility "lguest" which runs a Linux guest.
-CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
+CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE -Iinclude
 
 all: lguest
 
+include/linux/virtio_types.h: ../../include/uapi/linux/virtio_types.h
+       mkdir -p include/linux 2>&1 || true
+       ln -sf ../../../../include/uapi/linux/virtio_types.h $@
+
+lguest: include/linux/virtio_types.h
+
 clean:
        rm -f lguest
index 32cf2ce15d69bcfca9c24da9ad318fc1a2e84eb2..e44052483ed933bbbf8f09cd91a84d1ef1790835 100644 (file)
@@ -41,6 +41,8 @@
 #include <signal.h>
 #include <pwd.h>
 #include <grp.h>
+#include <sys/user.h>
+#include <linux/pci_regs.h>
 
 #ifndef VIRTIO_F_ANY_LAYOUT
 #define VIRTIO_F_ANY_LAYOUT            27
@@ -61,12 +63,19 @@ typedef uint16_t u16;
 typedef uint8_t u8;
 /*:*/
 
-#include <linux/virtio_config.h>
-#include <linux/virtio_net.h>
-#include <linux/virtio_blk.h>
-#include <linux/virtio_console.h>
-#include <linux/virtio_rng.h>
+#define VIRTIO_CONFIG_NO_LEGACY
+#define VIRTIO_PCI_NO_LEGACY
+#define VIRTIO_BLK_NO_LEGACY
+#define VIRTIO_NET_NO_LEGACY
+
+/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
+#include "../../include/uapi/linux/virtio_config.h"
+#include "../../include/uapi/linux/virtio_net.h"
+#include "../../include/uapi/linux/virtio_blk.h"
+#include "../../include/uapi/linux/virtio_console.h"
+#include "../../include/uapi/linux/virtio_rng.h"
 #include <linux/virtio_ring.h>
+#include "../../include/uapi/linux/virtio_pci.h"
 #include <asm/bootparam.h>
 #include "../../include/linux/lguest_launcher.h"
 
@@ -91,13 +100,16 @@ static bool verbose;
 /* The pointer to the start of guest memory. */
 static void *guest_base;
 /* The maximum guest physical address allowed, and maximum possible. */
-static unsigned long guest_limit, guest_max;
+static unsigned long guest_limit, guest_max, guest_mmio;
 /* The /dev/lguest file descriptor. */
 static int lguest_fd;
 
 /* a per-cpu variable indicating whose vcpu is currently running */
 static unsigned int __thread cpu_id;
 
+/* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */
+#define MAX_PCI_DEVICES 32
+
 /* This is our list of devices. */
 struct device_list {
        /* Counter to assign interrupt numbers. */
@@ -106,30 +118,50 @@ struct device_list {
        /* Counter to print out convenient device numbers. */
        unsigned int device_num;
 
-       /* The descriptor page for the devices. */
-       u8 *descpage;
-
-       /* A single linked list of devices. */
-       struct device *dev;
-       /* And a pointer to the last device for easy append. */
-       struct device *lastdev;
+       /* PCI devices. */
+       struct device *pci[MAX_PCI_DEVICES];
 };
 
 /* The list of Guest devices, based on command line arguments. */
 static struct device_list devices;
 
-/* The device structure describes a single device. */
-struct device {
-       /* The linked-list pointer. */
-       struct device *next;
+struct virtio_pci_cfg_cap {
+       struct virtio_pci_cap cap;
+       u32 pci_cfg_data; /* Data for BAR access. */
+};
 
-       /* The device's descriptor, as mapped into the Guest. */
-       struct lguest_device_desc *desc;
+struct virtio_pci_mmio {
+       struct virtio_pci_common_cfg cfg;
+       u16 notify;
+       u8 isr;
+       u8 padding;
+       /* Device-specific configuration follows this. */
+};
 
-       /* We can't trust desc values once Guest has booted: we use these. */
-       unsigned int feature_len;
-       unsigned int num_vq;
+/* This is the layout (little-endian) of the PCI config space. */
+struct pci_config {
+       u16 vendor_id, device_id;
+       u16 command, status;
+       u8 revid, prog_if, subclass, class;
+       u8 cacheline_size, lat_timer, header_type, bist;
+       u32 bar[6];
+       u32 cardbus_cis_ptr;
+       u16 subsystem_vendor_id, subsystem_device_id;
+       u32 expansion_rom_addr;
+       u8 capabilities, reserved1[3];
+       u32 reserved2;
+       u8 irq_line, irq_pin, min_grant, max_latency;
+
+       /* Now, this is the linked capability list. */
+       struct virtio_pci_cap common;
+       struct virtio_pci_notify_cap notify;
+       struct virtio_pci_cap isr;
+       struct virtio_pci_cap device;
+       struct virtio_pci_cfg_cap cfg_access;
+};
 
+/* The device structure describes a single device. */
+struct device {
        /* The name of this device, for --verbose. */
        const char *name;
 
@@ -139,6 +171,25 @@ struct device {
        /* Is it operational */
        bool running;
 
+       /* Has it written FEATURES_OK but not re-checked it? */
+       bool wrote_features_ok;
+
+       /* PCI configuration */
+       union {
+               struct pci_config config;
+               u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
+       };
+
+       /* Features we offer, and those accepted. */
+       u64 features, features_accepted;
+
+       /* Device-specific config hangs off the end of this. */
+       struct virtio_pci_mmio *mmio;
+
+       /* PCI MMIO resources (all in BAR0) */
+       size_t mmio_size;
+       u32 mmio_addr;
+
        /* Device-specific data. */
        void *priv;
 };
@@ -150,12 +201,15 @@ struct virtqueue {
        /* Which device owns me. */
        struct device *dev;
 
-       /* The configuration for this queue. */
-       struct lguest_vqconfig config;
+       /* Name for printing errors. */
+       const char *name;
 
        /* The actual ring of buffers. */
        struct vring vring;
 
+       /* The information about this virtqueue (we only use queue_size on) */
+       struct virtio_pci_common_cfg pci_config;
+
        /* Last available index we saw. */
        u16 last_avail_idx;
 
@@ -199,6 +253,16 @@ static struct termios orig_term;
 #define le32_to_cpu(v32) (v32)
 #define le64_to_cpu(v64) (v64)
 
+/*
+ * A real device would ignore weird/non-compliant driver behaviour.  We
+ * stop and flag it, to help debugging Linux problems.
+ */
+#define bad_driver(d, fmt, ...) \
+       errx(1, "%s: bad driver: " fmt, (d)->name, ## __VA_ARGS__)
+#define bad_driver_vq(vq, fmt, ...)                           \
+       errx(1, "%s vq %s: bad driver: " fmt, (vq)->dev->name, \
+            vq->name, ## __VA_ARGS__)
+
 /* Is this iovec empty? */
 static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
 {
@@ -211,7 +275,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
 }
 
 /* Take len bytes from the front of this iovec. */
-static void iov_consume(struct iovec iov[], unsigned num_iov,
+static void iov_consume(struct device *d,
+                       struct iovec iov[], unsigned num_iov,
                        void *dest, unsigned len)
 {
        unsigned int i;
@@ -229,14 +294,7 @@ static void iov_consume(struct iovec iov[], unsigned num_iov,
                len -= used;
        }
        if (len != 0)
-               errx(1, "iovec too short!");
-}
-
-/* The device virtqueue descriptors are followed by feature bitmasks. */
-static u8 *get_feature_bits(struct device *dev)
-{
-       return (u8 *)(dev->desc + 1)
-               + dev->num_vq * sizeof(struct lguest_vqconfig);
+               bad_driver(d, "iovec too short!");
 }
 
 /*L:100
@@ -309,14 +367,20 @@ static void *map_zeroed_pages(unsigned int num)
        return addr + getpagesize();
 }
 
-/* Get some more pages for a device. */
-static void *get_pages(unsigned int num)
+/* Get some bytes which won't be mapped into the guest. */
+static unsigned long get_mmio_region(size_t size)
 {
-       void *addr = from_guest_phys(guest_limit);
+       unsigned long addr = guest_mmio;
+       size_t i;
+
+       if (!size)
+               return addr;
+
+       /* Size has to be a power of 2 (and multiple of 16) */
+       for (i = 1; i < size; i <<= 1);
+
+       guest_mmio += i;
 
-       guest_limit += num * getpagesize();
-       if (guest_limit > guest_max)
-               errx(1, "Not enough memory for devices");
        return addr;
 }
 
@@ -547,9 +611,11 @@ static void tell_kernel(unsigned long start)
 {
        unsigned long args[] = { LHREQ_INITIALIZE,
                                 (unsigned long)guest_base,
-                                guest_limit / getpagesize(), start };
-       verbose("Guest: %p - %p (%#lx)\n",
-               guest_base, guest_base + guest_limit, guest_limit);
+                                guest_limit / getpagesize(), start,
+                                (guest_mmio+getpagesize()-1) / getpagesize() };
+       verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n",
+               guest_base, guest_base + guest_limit,
+               guest_limit, guest_mmio);
        lguest_fd = open_or_die("/dev/lguest", O_RDWR);
        if (write(lguest_fd, args, sizeof(args)) < 0)
                err(1, "Writing to /dev/lguest");
@@ -564,7 +630,8 @@ static void tell_kernel(unsigned long start)
  * we have a convenient routine which checks it and exits with an error message
  * if something funny is going on:
  */
-static void *_check_pointer(unsigned long addr, unsigned int size,
+static void *_check_pointer(struct device *d,
+                           unsigned long addr, unsigned int size,
                            unsigned int line)
 {
        /*
@@ -572,7 +639,8 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
         * or addr + size wraps around.
         */
        if ((addr + size) > guest_limit || (addr + size) < addr)
-               errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
+               bad_driver(d, "%s:%i: Invalid address %#lx",
+                          __FILE__, line, addr);
        /*
         * We return a pointer for the caller's convenience, now we know it's
         * safe to use.
@@ -580,14 +648,14 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
        return from_guest_phys(addr);
 }
 /* A macro which transparently hands the line number to the real function. */
-#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)
+#define check_pointer(d,addr,size) _check_pointer(d, addr, size, __LINE__)
 
 /*
  * Each buffer in the virtqueues is actually a chain of descriptors.  This
  * function returns the next descriptor in the chain, or vq->vring.num if we're
  * at the end.
  */
-static unsigned next_desc(struct vring_desc *desc,
+static unsigned next_desc(struct device *d, struct vring_desc *desc,
                          unsigned int i, unsigned int max)
 {
        unsigned int next;
@@ -602,7 +670,7 @@ static unsigned next_desc(struct vring_desc *desc,
        wmb();
 
        if (next >= max)
-               errx(1, "Desc next is %u", next);
+               bad_driver(d, "Desc next is %u", next);
 
        return next;
 }
@@ -613,21 +681,48 @@ static unsigned next_desc(struct vring_desc *desc,
  */
 static void trigger_irq(struct virtqueue *vq)
 {
-       unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };
+       unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line };
 
        /* Don't inform them if nothing used. */
        if (!vq->pending_used)
                return;
        vq->pending_used = 0;
 
-       /* If they don't want an interrupt, don't send one... */
+       /*
+        * 2.4.7.1:
+        *
+        *  If the VIRTIO_F_EVENT_IDX feature bit is not negotiated:
+        *    The driver MUST set flags to 0 or 1. 
+        */
+       if (vq->vring.avail->flags > 1)
+               bad_driver_vq(vq, "avail->flags = %u\n", vq->vring.avail->flags);
+
+       /*
+        * 2.4.7.2:
+        *
+        *  If the VIRTIO_F_EVENT_IDX feature bit is not negotiated:
+        *
+        *     - The device MUST ignore the used_event value.
+        *     - After the device writes a descriptor index into the used ring:
+        *         - If flags is 1, the device SHOULD NOT send an interrupt.
+        *         - If flags is 0, the device MUST send an interrupt.
+        */
        if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
                return;
        }
 
+       /*
+        * 4.1.4.5.1:
+        *
+        *  If MSI-X capability is disabled, the device MUST set the Queue
+        *  Interrupt bit in ISR status before sending a virtqueue notification
+        *  to the driver.
+        */
+       vq->dev->mmio->isr = 0x1;
+
        /* Send the Guest an interrupt tell them we used something up. */
        if (write(lguest_fd, buf, sizeof(buf)) != 0)
-               err(1, "Triggering irq %i", vq->config.irq);
+               err(1, "Triggering irq %i", vq->dev->config.irq_line);
 }
 
 /*
@@ -646,6 +741,14 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
        struct vring_desc *desc;
        u16 last_avail = lg_last_avail(vq);
 
+       /*
+        * 2.4.7.1:
+        *
+        *   The driver MUST handle spurious interrupts from the device.
+        *
+        * That's why this is a while loop.
+        */
+
        /* There's nothing available? */
        while (last_avail == vq->vring.avail->idx) {
                u64 event;
@@ -679,8 +782,8 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
 
        /* Check it isn't doing very strange things with descriptor numbers. */
        if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
-               errx(1, "Guest moved used index from %u to %u",
-                    last_avail, vq->vring.avail->idx);
+               bad_driver_vq(vq, "Guest moved used index from %u to %u",
+                             last_avail, vq->vring.avail->idx);
 
        /* 
         * Make sure we read the descriptor number *after* we read the ring
@@ -697,7 +800,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
 
        /* If their number is silly, that's a fatal mistake. */
        if (head >= vq->vring.num)
-               errx(1, "Guest says index %u is available", head);
+               bad_driver_vq(vq, "Guest says index %u is available", head);
 
        /* When we start there are none of either input nor output. */
        *out_num = *in_num = 0;
@@ -712,24 +815,73 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
         * that: no rmb() required.
         */
 
-       /*
-        * If this is an indirect entry, then this buffer contains a descriptor
-        * table which we handle as if it's any normal descriptor chain.
-        */
-       if (desc[i].flags & VRING_DESC_F_INDIRECT) {
-               if (desc[i].len % sizeof(struct vring_desc))
-                       errx(1, "Invalid size for indirect buffer table");
+       do {
+               /*
+                * If this is an indirect entry, then this buffer contains a
+                * descriptor table which we handle as if it's any normal
+                * descriptor chain.
+                */
+               if (desc[i].flags & VRING_DESC_F_INDIRECT) {
+                       /* 2.4.5.3.1:
+                        *
+                        *  The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT
+                        *  flag unless the VIRTIO_F_INDIRECT_DESC feature was
+                        *  negotiated.
+                        */
+                       if (!(vq->dev->features_accepted &
+                             (1<<VIRTIO_RING_F_INDIRECT_DESC)))
+                               bad_driver_vq(vq, "vq indirect not negotiated");
 
-               max = desc[i].len / sizeof(struct vring_desc);
-               desc = check_pointer(desc[i].addr, desc[i].len);
-               i = 0;
-       }
+                       /*
+                        * 2.4.5.3.1:
+                        *
+                        *   The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT
+                        *   flag within an indirect descriptor (ie. only one
+                        *   table per descriptor).
+                        */
+                       if (desc != vq->vring.desc)
+                               bad_driver_vq(vq, "Indirect within indirect");
+
+                       /*
+                        * Proposed update VIRTIO-134 spells this out:
+                        *
+                        *   A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT
+                        *   and VIRTQ_DESC_F_NEXT in flags.
+                        */
+                       if (desc[i].flags & VRING_DESC_F_NEXT)
+                               bad_driver_vq(vq, "indirect and next together");
+
+                       if (desc[i].len % sizeof(struct vring_desc))
+                               bad_driver_vq(vq,
+                                             "Invalid size for indirect table");
+                       /*
+                        * 2.4.5.3.2:
+                        *
+                        *  The device MUST ignore the write-only flag
+                        *  (flags&VIRTQ_DESC_F_WRITE) in the descriptor that
+                        *  refers to an indirect table.
+                        *
+                        * We ignore it here: :)
+                        */
+
+                       max = desc[i].len / sizeof(struct vring_desc);
+                       desc = check_pointer(vq->dev, desc[i].addr, desc[i].len);
+                       i = 0;
+
+                       /* 2.4.5.3.1:
+                        *
+                        *  A driver MUST NOT create a descriptor chain longer
+                        *  than the Queue Size of the device.
+                        */
+                       if (max > vq->pci_config.queue_size)
+                               bad_driver_vq(vq,
+                                             "indirect has too many entries");
+               }
 
-       do {
                /* Grab the first descriptor, and check it's OK. */
                iov[*out_num + *in_num].iov_len = desc[i].len;
                iov[*out_num + *in_num].iov_base
-                       = check_pointer(desc[i].addr, desc[i].len);
+                       = check_pointer(vq->dev, desc[i].addr, desc[i].len);
                /* If this is an input descriptor, increment that count. */
                if (desc[i].flags & VRING_DESC_F_WRITE)
                        (*in_num)++;
@@ -739,14 +891,15 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
                         * to come before any input descriptors.
                         */
                        if (*in_num)
-                               errx(1, "Descriptor has out after in");
+                               bad_driver_vq(vq,
+                                             "Descriptor has out after in");
                        (*out_num)++;
                }
 
                /* If we've got too many, that implies a descriptor loop. */
                if (*out_num + *in_num > max)
-                       errx(1, "Looped descriptor");
-       } while ((i = next_desc(desc, i, max)) != max);
+                       bad_driver_vq(vq, "Looped descriptor");
+       } while ((i = next_desc(vq->dev, desc, i, max)) != max);
 
        return head;
 }
@@ -803,7 +956,7 @@ static void console_input(struct virtqueue *vq)
        /* Make sure there's a descriptor available. */
        head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
        if (out_num)
-               errx(1, "Output buffers in console in queue?");
+               bad_driver_vq(vq, "Output buffers in console in queue?");
 
        /* Read into it.  This is where we usually wait. */
        len = readv(STDIN_FILENO, iov, in_num);
@@ -856,7 +1009,7 @@ static void console_output(struct virtqueue *vq)
        /* We usually wait in here, for the Guest to give us something. */
        head = wait_for_vq_desc(vq, iov, &out, &in);
        if (in)
-               errx(1, "Input buffers in console output queue?");
+               bad_driver_vq(vq, "Input buffers in console output queue?");
 
        /* writev can return a partial write, so we loop here. */
        while (!iov_empty(iov, out)) {
@@ -865,7 +1018,7 @@ static void console_output(struct virtqueue *vq)
                        warn("Write to stdout gave %i (%d)", len, errno);
                        break;
                }
-               iov_consume(iov, out, NULL, len);
+               iov_consume(vq->dev, iov, out, NULL, len);
        }
 
        /*
@@ -894,7 +1047,7 @@ static void net_output(struct virtqueue *vq)
        /* We usually wait in here for the Guest to give us a packet. */
        head = wait_for_vq_desc(vq, iov, &out, &in);
        if (in)
-               errx(1, "Input buffers in net output queue?");
+               bad_driver_vq(vq, "Input buffers in net output queue?");
        /*
         * Send the whole thing through to /dev/net/tun.  It expects the exact
         * same format: what a coincidence!
@@ -942,7 +1095,7 @@ static void net_input(struct virtqueue *vq)
         */
        head = wait_for_vq_desc(vq, iov, &out, &in);
        if (out)
-               errx(1, "Output buffers in net input queue?");
+               bad_driver_vq(vq, "Output buffers in net input queue?");
 
        /*
         * If it looks like we'll block reading from the tun device, send them
@@ -986,6 +1139,12 @@ static void kill_launcher(int signal)
        kill(0, SIGTERM);
 }
 
+static void reset_vq_pci_config(struct virtqueue *vq)
+{
+       vq->pci_config.queue_size = VIRTQUEUE_NUM;
+       vq->pci_config.queue_enable = 0;
+}
+
 static void reset_device(struct device *dev)
 {
        struct virtqueue *vq;
@@ -993,53 +1152,705 @@ static void reset_device(struct device *dev)
        verbose("Resetting device %s\n", dev->name);
 
        /* Clear any features they've acked. */
-       memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len);
+       dev->features_accepted = 0;
 
        /* We're going to be explicitly killing threads, so ignore them. */
        signal(SIGCHLD, SIG_IGN);
 
-       /* Zero out the virtqueues, get rid of their threads */
+       /*
+        * 4.1.4.3.1:
+        *
+        *   The device MUST present a 0 in queue_enable on reset. 
+        *
+        * This means we set it here, and reset the saved ones in every vq.
+        */
+       dev->mmio->cfg.queue_enable = 0;
+
+       /* Get rid of the virtqueue threads */
        for (vq = dev->vq; vq; vq = vq->next) {
+               vq->last_avail_idx = 0;
+               reset_vq_pci_config(vq);
                if (vq->thread != (pid_t)-1) {
                        kill(vq->thread, SIGTERM);
                        waitpid(vq->thread, NULL, 0);
                        vq->thread = (pid_t)-1;
                }
-               memset(vq->vring.desc, 0,
-                      vring_size(vq->config.num, LGUEST_VRING_ALIGN));
-               lg_last_avail(vq) = 0;
        }
        dev->running = false;
+       dev->wrote_features_ok = false;
 
        /* Now we care if threads die. */
        signal(SIGCHLD, (void *)kill_launcher);
 }
 
+static void cleanup_devices(void)
+{
+       unsigned int i;
+
+       for (i = 1; i < MAX_PCI_DEVICES; i++) {
+               struct device *d = devices.pci[i];
+               if (!d)
+                       continue;
+               reset_device(d);
+       }
+
+       /* If we saved off the original terminal settings, restore them now. */
+       if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
+               tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
+}
+
+/*L:217
+ * We do PCI.  This is mainly done to let us test the kernel virtio PCI
+ * code.
+ */
+
+/* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */
+static struct device pci_host_bridge;
+
+static void init_pci_host_bridge(void)
+{
+       pci_host_bridge.name = "PCI Host Bridge";
+       pci_host_bridge.config.class = 0x06; /* bridge */
+       pci_host_bridge.config.subclass = 0; /* host bridge */
+       devices.pci[0] = &pci_host_bridge;
+}
+
+/* The IO ports used to read the PCI config space. */
+#define PCI_CONFIG_ADDR 0xCF8
+#define PCI_CONFIG_DATA 0xCFC
+
+/*
+ * Not really portable, but does help readability: this is what the Guest
+ * writes to the PCI_CONFIG_ADDR IO port.
+ */
+union pci_config_addr {
+       struct {
+               unsigned mbz: 2;
+               unsigned offset: 6;
+               unsigned funcnum: 3;
+               unsigned devnum: 5;
+               unsigned busnum: 8;
+               unsigned reserved: 7;
+               unsigned enabled : 1;
+       } bits;
+       u32 val;
+};
+
+/*
+ * We cache what they wrote to the address port, so we know what they're
+ * talking about when they access the data port.
+ */
+static union pci_config_addr pci_config_addr;
+
+static struct device *find_pci_device(unsigned int index)
+{
+       return devices.pci[index];
+}
+
+/* PCI can do 1, 2 and 4 byte reads; we handle that here. */
+static void ioread(u16 off, u32 v, u32 mask, u32 *val)
+{
+       assert(off < 4);
+       assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
+       *val = (v >> (off * 8)) & mask;
+}
+
+/* PCI can do 1, 2 and 4 byte writes; we handle that here. */
+static void iowrite(u16 off, u32 v, u32 mask, u32 *dst)
+{
+       assert(off < 4);
+       assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
+       *dst &= ~(mask << (off * 8));
+       *dst |= (v & mask) << (off * 8);
+}
+
+/*
+ * Where PCI_CONFIG_DATA accesses depends on the previous write to
+ * PCI_CONFIG_ADDR.
+ */
+static struct device *dev_and_reg(u32 *reg)
+{
+       if (!pci_config_addr.bits.enabled)
+               return NULL;
+
+       if (pci_config_addr.bits.funcnum != 0)
+               return NULL;
+
+       if (pci_config_addr.bits.busnum != 0)
+               return NULL;
+
+       if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config))
+               return NULL;
+
+       *reg = pci_config_addr.bits.offset;
+       return find_pci_device(pci_config_addr.bits.devnum);
+}
+
+/*
+ * We can get invalid combinations of values while they're writing, so we
+ * only fault if they try to write with some invalid bar/offset/length.
+ */
+static bool valid_bar_access(struct device *d,
+                            struct virtio_pci_cfg_cap *cfg_access)
+{
+       /* We only have 1 bar (BAR0) */
+       if (cfg_access->cap.bar != 0)
+               return false;
+
+       /* Check it's within BAR0. */
+       if (cfg_access->cap.offset >= d->mmio_size
+           || cfg_access->cap.offset + cfg_access->cap.length > d->mmio_size)
+               return false;
+
+       /* Check length is 1, 2 or 4. */
+       if (cfg_access->cap.length != 1
+           && cfg_access->cap.length != 2
+           && cfg_access->cap.length != 4)
+               return false;
+
+       /*
+        * 4.1.4.7.2:
+        *
+        *  The driver MUST NOT write a cap.offset which is not a multiple of
+        *  cap.length (ie. all accesses MUST be aligned).
+        */
+       if (cfg_access->cap.offset % cfg_access->cap.length != 0)
+               return false;
+
+       /* Return pointer into word in BAR0. */
+       return true;
+}
+
+/* Is this accessing the PCI config address port?. */
+static bool is_pci_addr_port(u16 port)
+{
+       return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4;
+}
+
+static bool pci_addr_iowrite(u16 port, u32 mask, u32 val)
+{
+       iowrite(port - PCI_CONFIG_ADDR, val, mask,
+               &pci_config_addr.val);
+       verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n",
+               pci_config_addr.bits.enabled ? "" : " DISABLED",
+               val, mask,
+               pci_config_addr.bits.busnum,
+               pci_config_addr.bits.devnum,
+               pci_config_addr.bits.funcnum,
+               pci_config_addr.bits.offset);
+       return true;
+}
+
+static void pci_addr_ioread(u16 port, u32 mask, u32 *val)
+{
+       ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val);
+}
+
+/* Is this accessing the PCI config data port?. */
+static bool is_pci_data_port(u16 port)
+{
+       return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4;
+}
+
+static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask);
+
+static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
+{
+       u32 reg, portoff;
+       struct device *d = dev_and_reg(&reg);
+
+       /* Complain if they don't belong to a device. */
+       if (!d)
+               return false;
+
+       /* They can do 1 byte writes, etc. */
+       portoff = port - PCI_CONFIG_DATA;
+
+       /*
+        * PCI uses a weird way to determine the BAR size: the OS
+        * writes all 1's, and sees which ones stick.
+        */
+       if (&d->config_words[reg] == &d->config.bar[0]) {
+               int i;
+
+               iowrite(portoff, val, mask, &d->config.bar[0]);
+               for (i = 0; (1 << i) < d->mmio_size; i++)
+                       d->config.bar[0] &= ~(1 << i);
+               return true;
+       } else if ((&d->config_words[reg] > &d->config.bar[0]
+                   && &d->config_words[reg] <= &d->config.bar[6])
+                  || &d->config_words[reg] == &d->config.expansion_rom_addr) {
+               /* Allow writing to any other BAR, or expansion ROM */
+               iowrite(portoff, val, mask, &d->config_words[reg]);
+               return true;
+               /* We let them overide latency timer and cacheline size */
+       } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
+               /* Only let them change the first two fields. */
+               if (mask == 0xFFFFFFFF)
+                       mask = 0xFFFF;
+               iowrite(portoff, val, mask, &d->config_words[reg]);
+               return true;
+       } else if (&d->config_words[reg] == (void *)&d->config.command
+                  && mask == 0xFFFF) {
+               /* Ignore command writes. */
+               return true;
+       } else if (&d->config_words[reg]
+                  == (void *)&d->config.cfg_access.cap.bar
+                  || &d->config_words[reg]
+                  == &d->config.cfg_access.cap.length
+                  || &d->config_words[reg]
+                  == &d->config.cfg_access.cap.offset) {
+
+               /*
+                * The VIRTIO_PCI_CAP_PCI_CFG capability
+                * provides a backdoor to access the MMIO
+                * regions without mapping them.  Weird, but
+                * useful.
+                */
+               iowrite(portoff, val, mask, &d->config_words[reg]);
+               return true;
+       } else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
+               u32 write_mask;
+
+               /*
+                * 4.1.4.7.1:
+                *
+                *  Upon detecting driver write access to pci_cfg_data, the
+                *  device MUST execute a write access at offset cap.offset at
+                *  BAR selected by cap.bar using the first cap.length bytes
+                *  from pci_cfg_data.
+                */
+
+               /* Must be bar 0 */
+               if (!valid_bar_access(d, &d->config.cfg_access))
+                       return false;
+
+               iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data);
+
+               /*
+                * Now emulate a write.  The mask we use is set by
+                * len, *not* this write!
+                */
+               write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1;
+               verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n",
+                       d->config.cfg_access.pci_cfg_data, write_mask,
+                       d->config.cfg_access.cap.bar,
+                       d->config.cfg_access.cap.offset,
+                       d->config.cfg_access.cap.length);
+
+               emulate_mmio_write(d, d->config.cfg_access.cap.offset,
+                                  d->config.cfg_access.pci_cfg_data,
+                                  write_mask);
+               return true;
+       }
+
+       /*
+        * 4.1.4.1:
+        *
+        *  The driver MUST NOT write into any field of the capability
+        *  structure, with the exception of those with cap_type
+        *  VIRTIO_PCI_CAP_PCI_CFG...
+        */
+       return false;
+}
+
+static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask);
+
+static void pci_data_ioread(u16 port, u32 mask, u32 *val)
+{
+       u32 reg;
+       struct device *d = dev_and_reg(&reg);
+
+       if (!d)
+               return;
+
+       /* Read through the PCI MMIO access window is special */
+       if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
+               u32 read_mask;
+
+               /*
+                * 4.1.4.7.1:
+                *
+                *  Upon detecting driver read access to pci_cfg_data, the
+                *  device MUST execute a read access of length cap.length at
+                *  offset cap.offset at BAR selected by cap.bar and store the
+                *  first cap.length bytes in pci_cfg_data.
+                */
+               /* Must be bar 0 */
+               if (!valid_bar_access(d, &d->config.cfg_access))
+                       bad_driver(d,
+                            "Invalid cfg_access to bar%u, offset %u len %u",
+                            d->config.cfg_access.cap.bar,
+                            d->config.cfg_access.cap.offset,
+                            d->config.cfg_access.cap.length);
+
+               /*
+                * Read into the window.  The mask we use is set by
+                * len, *not* this read!
+                */
+               read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1;
+               d->config.cfg_access.pci_cfg_data
+                       = emulate_mmio_read(d,
+                                           d->config.cfg_access.cap.offset,
+                                           read_mask);
+               verbose("Window read %#x/%#x from bar %u, offset %u len %u\n",
+                       d->config.cfg_access.pci_cfg_data, read_mask,
+                       d->config.cfg_access.cap.bar,
+                       d->config.cfg_access.cap.offset,
+                       d->config.cfg_access.cap.length);
+       }
+       ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val);
+}
+
 /*L:216
- * This actually creates the thread which services the virtqueue for a device.
+ * This is where we emulate a handful of Guest instructions.  It's ugly
+ * and we used to do it in the kernel but it grew over time.
+ */
+
+/*
+ * We use the ptrace syscall's pt_regs struct to talk about registers
+ * to lguest: these macros convert the names to the offsets.
+ */
+#define getreg(name) getreg_off(offsetof(struct user_regs_struct, name))
+#define setreg(name, val) \
+       setreg_off(offsetof(struct user_regs_struct, name), (val))
+
+static u32 getreg_off(size_t offset)
+{
+       u32 r;
+       unsigned long args[] = { LHREQ_GETREG, offset };
+
+       if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
+               err(1, "Getting register %u", offset);
+       if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r))
+               err(1, "Reading register %u", offset);
+
+       return r;
+}
+
+static void setreg_off(size_t offset, u32 val)
+{
+       unsigned long args[] = { LHREQ_SETREG, offset, val };
+
+       if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
+               err(1, "Setting register %u", offset);
+}
+
+/* Get register by instruction encoding */
+static u32 getreg_num(unsigned regnum, u32 mask)
+{
+       /* 8 bit ops use regnums 4-7 for high parts of word */
+       if (mask == 0xFF && (regnum & 0x4))
+               return getreg_num(regnum & 0x3, 0xFFFF) >> 8;
+
+       switch (regnum) {
+       case 0: return getreg(eax) & mask;
+       case 1: return getreg(ecx) & mask;
+       case 2: return getreg(edx) & mask;
+       case 3: return getreg(ebx) & mask;
+       case 4: return getreg(esp) & mask;
+       case 5: return getreg(ebp) & mask;
+       case 6: return getreg(esi) & mask;
+       case 7: return getreg(edi) & mask;
+       }
+       abort();
+}
+
+/* Set register by instruction encoding */
+static void setreg_num(unsigned regnum, u32 val, u32 mask)
+{
+       /* Don't try to set bits out of range */
+       assert(~(val & ~mask));
+
+       /* 8 bit ops use regnums 4-7 for high parts of word */
+       if (mask == 0xFF && (regnum & 0x4)) {
+               /* Construct the 16 bits we want. */
+               val = (val << 8) | getreg_num(regnum & 0x3, 0xFF);
+               setreg_num(regnum & 0x3, val, 0xFFFF);
+               return;
+       }
+
+       switch (regnum) {
+       case 0: setreg(eax, val | (getreg(eax) & ~mask)); return;
+       case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return;
+       case 2: setreg(edx, val | (getreg(edx) & ~mask)); return;
+       case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return;
+       case 4: setreg(esp, val | (getreg(esp) & ~mask)); return;
+       case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return;
+       case 6: setreg(esi, val | (getreg(esi) & ~mask)); return;
+       case 7: setreg(edi, val | (getreg(edi) & ~mask)); return;
+       }
+       abort();
+}
+
+/* Get bytes of displacement appended to instruction, from r/m encoding */
+static u32 insn_displacement_len(u8 mod_reg_rm)
+{
+       /* Switch on the mod bits */
+       switch (mod_reg_rm >> 6) {
+       case 0:
+               /* If mod == 0, and r/m == 101, 16-bit displacement follows */
+               if ((mod_reg_rm & 0x7) == 0x5)
+                       return 2;
+               /* Normally, mod == 0 means no literal displacement */
+               return 0;
+       case 1:
+               /* One byte displacement */
+               return 1;
+       case 2:
+               /* Four byte displacement */
+               return 4;
+       case 3:
+               /* Register mode */
+               return 0;
+       }
+       abort();
+}
+
+static void emulate_insn(const u8 insn[])
+{
+       unsigned long args[] = { LHREQ_TRAP, 13 };
+       unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access;
+       unsigned int eax, port, mask;
+       /*
+        * Default is to return all-ones on IO port reads, which traditionally
+        * means "there's nothing there".
+        */
+       u32 val = 0xFFFFFFFF;
+
+       /*
+        * This must be the Guest kernel trying to do something, not userspace!
+        * The bottom two bits of the CS segment register are the privilege
+        * level.
+        */
+       if ((getreg(xcs) & 3) != 0x1)
+               goto no_emulate;
+
+       /* Decoding x86 instructions is icky. */
+
+       /*
+        * Around 2.6.33, the kernel started using an emulation for the
+        * cmpxchg8b instruction in early boot on many configurations.  This
+        * code isn't paravirtualized, and it tries to disable interrupts.
+        * Ignore it, which will Mostly Work.
+        */
+       if (insn[insnlen] == 0xfa) {
+               /* "cli", or Clear Interrupt Enable instruction.  Skip it. */
+               insnlen = 1;
+               goto skip_insn;
+       }
+
+       /*
+        * 0x66 is an "operand prefix".  It means a 16, not 32 bit in/out.
+        */
+       if (insn[insnlen] == 0x66) {
+               small_operand = 1;
+               /* The instruction is 1 byte so far, read the next byte. */
+               insnlen = 1;
+       }
+
+       /* If the lower bit isn't set, it's a single byte access */
+       byte_access = !(insn[insnlen] & 1);
+
+       /*
+        * Now we can ignore the lower bit and decode the 4 opcodes
+        * we need to emulate.
+        */
+       switch (insn[insnlen] & 0xFE) {
+       case 0xE4: /* in     <next byte>,%al */
+               port = insn[insnlen+1];
+               insnlen += 2;
+               in = 1;
+               break;
+       case 0xEC: /* in     (%dx),%al */
+               port = getreg(edx) & 0xFFFF;
+               insnlen += 1;
+               in = 1;
+               break;
+       case 0xE6: /* out    %al,<next byte> */
+               port = insn[insnlen+1];
+               insnlen += 2;
+               break;
+       case 0xEE: /* out    %al,(%dx) */
+               port = getreg(edx) & 0xFFFF;
+               insnlen += 1;
+               break;
+       default:
+               /* OK, we don't know what this is, can't emulate. */
+               goto no_emulate;
+       }
+
+       /* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */
+       if (byte_access)
+               mask = 0xFF;
+       else if (small_operand)
+               mask = 0xFFFF;
+       else
+               mask = 0xFFFFFFFF;
+
+       /*
+        * If it was an "IN" instruction, they expect the result to be read
+        * into %eax, so we change %eax.
+        */
+       eax = getreg(eax);
+
+       if (in) {
+               /* This is the PS/2 keyboard status; 1 means ready for output */
+               if (port == 0x64)
+                       val = 1;
+               else if (is_pci_addr_port(port))
+                       pci_addr_ioread(port, mask, &val);
+               else if (is_pci_data_port(port))
+                       pci_data_ioread(port, mask, &val);
+
+               /* Clear the bits we're about to read */
+               eax &= ~mask;
+               /* Copy bits in from val. */
+               eax |= val & mask;
+               /* Now update the register. */
+               setreg(eax, eax);
+       } else {
+               if (is_pci_addr_port(port)) {
+                       if (!pci_addr_iowrite(port, mask, eax))
+                               goto bad_io;
+               } else if (is_pci_data_port(port)) {
+                       if (!pci_data_iowrite(port, mask, eax))
+                               goto bad_io;
+               }
+               /* There are many other ports, eg. CMOS clock, serial
+                * and parallel ports, so we ignore them all. */
+       }
+
+       verbose("IO %s of %x to %u: %#08x\n",
+               in ? "IN" : "OUT", mask, port, eax);
+skip_insn:
+       /* Finally, we've "done" the instruction, so move past it. */
+       setreg(eip, getreg(eip) + insnlen);
+       return;
+
+bad_io:
+       warnx("Attempt to %s port %u (%#x mask)",
+             in ? "read from" : "write to", port, mask);
+
+no_emulate:
+       /* Inject trap into Guest. */
+       if (write(lguest_fd, args, sizeof(args)) < 0)
+               err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip));
+}
+
+static struct device *find_mmio_region(unsigned long paddr, u32 *off)
+{
+       unsigned int i;
+
+       for (i = 1; i < MAX_PCI_DEVICES; i++) {
+               struct device *d = devices.pci[i];
+
+               if (!d)
+                       continue;
+               if (paddr < d->mmio_addr)
+                       continue;
+               if (paddr >= d->mmio_addr + d->mmio_size)
+                       continue;
+               *off = paddr - d->mmio_addr;
+               return d;
+       }
+       return NULL;
+}
+
+/* FIXME: Use vq array. */
+static struct virtqueue *vq_by_num(struct device *d, u32 num)
+{
+       struct virtqueue *vq = d->vq;
+
+       while (num-- && vq)
+               vq = vq->next;
+
+       return vq;
+}
+
+static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
+                          struct virtqueue *vq)
+{
+       vq->pci_config = *cfg;
+}
+
+static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
+                             struct virtqueue *vq)
+{
+       /* Only restore the per-vq part */
+       size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);
+
+       memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
+              sizeof(*cfg) - off);
+}
+
+/*
+ * 4.1.4.3.2:
+ *
+ *  The driver MUST configure the other virtqueue fields before
+ *  enabling the virtqueue with queue_enable.
+ *
+ * When they enable the virtqueue, we check that their setup is valid.
  */
-static void create_thread(struct virtqueue *vq)
+static void check_virtqueue(struct device *d, struct virtqueue *vq)
+{
+       /* Because lguest is 32 bit, all the descriptor high bits must be 0 */
+       if (vq->pci_config.queue_desc_hi
+           || vq->pci_config.queue_avail_hi
+           || vq->pci_config.queue_used_hi)
+               bad_driver_vq(vq, "invalid 64-bit queue address");
+
+       /*
+        * 2.4.1:
+        *
+        *  The driver MUST ensure that the physical address of the first byte
+        *  of each virtqueue part is a multiple of the specified alignment
+        *  value in the above table.
+        */
+       if (vq->pci_config.queue_desc_lo % 16
+           || vq->pci_config.queue_avail_lo % 2
+           || vq->pci_config.queue_used_lo % 4)
+               bad_driver_vq(vq, "invalid alignment in queue addresses");
+
+       /* Initialize the virtqueue and check they're all in range. */
+       vq->vring.num = vq->pci_config.queue_size;
+       vq->vring.desc = check_pointer(vq->dev,
+                                      vq->pci_config.queue_desc_lo,
+                                      sizeof(*vq->vring.desc) * vq->vring.num);
+       vq->vring.avail = check_pointer(vq->dev,
+                                       vq->pci_config.queue_avail_lo,
+                                       sizeof(*vq->vring.avail)
+                                       + (sizeof(vq->vring.avail->ring[0])
+                                          * vq->vring.num));
+       vq->vring.used = check_pointer(vq->dev,
+                                      vq->pci_config.queue_used_lo,
+                                      sizeof(*vq->vring.used)
+                                      + (sizeof(vq->vring.used->ring[0])
+                                         * vq->vring.num));
+
+       /*
+        * 2.4.9.1:
+        *
+        *   The driver MUST initialize flags in the used ring to 0
+        *   when allocating the used ring.
+        */
+       if (vq->vring.used->flags != 0)
+               bad_driver_vq(vq, "invalid initial used.flags %#x",
+                             vq->vring.used->flags);
+}
+
+static void start_virtqueue(struct virtqueue *vq)
 {
        /*
         * Create stack for thread.  Since the stack grows upwards, we point
         * the stack pointer to the end of this region.
         */
        char *stack = malloc(32768);
-       unsigned long args[] = { LHREQ_EVENTFD,
-                                vq->config.pfn*getpagesize(), 0 };
 
        /* Create a zero-initialized eventfd. */
        vq->eventfd = eventfd(0, 0);
        if (vq->eventfd < 0)
                err(1, "Creating eventfd");
-       args[2] = vq->eventfd;
-
-       /*
-        * Attach an eventfd to this virtqueue: it will go off when the Guest
-        * does an LHCALL_NOTIFY for this vq.
-        */
-       if (write(lguest_fd, &args, sizeof(args)) != 0)
-               err(1, "Attaching eventfd");
 
        /*
         * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
@@ -1048,167 +1859,531 @@ static void create_thread(struct virtqueue *vq)
        vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
        if (vq->thread == (pid_t)-1)
                err(1, "Creating clone");
-
-       /* We close our local copy now the child has it. */
-       close(vq->eventfd);
 }
 
-static void start_device(struct device *dev)
+static void start_virtqueues(struct device *d)
 {
-       unsigned int i;
        struct virtqueue *vq;
 
-       verbose("Device %s OK: offered", dev->name);
-       for (i = 0; i < dev->feature_len; i++)
-               verbose(" %02x", get_feature_bits(dev)[i]);
-       verbose(", accepted");
-       for (i = 0; i < dev->feature_len; i++)
-               verbose(" %02x", get_feature_bits(dev)
-                       [dev->feature_len+i]);
-
-       for (vq = dev->vq; vq; vq = vq->next) {
-               if (vq->service)
-                       create_thread(vq);
+       for (vq = d->vq; vq; vq = vq->next) {
+               if (vq->pci_config.queue_enable)
+                       start_virtqueue(vq);
        }
-       dev->running = true;
 }
 
-static void cleanup_devices(void)
+static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
 {
-       struct device *dev;
+       struct virtqueue *vq;
 
-       for (dev = devices.dev; dev; dev = dev->next)
-               reset_device(dev);
+       switch (off) {
+       case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
+               /*
+                * 4.1.4.3.1:
+                *
+                * The device MUST present the feature bits it is offering in
+                * device_feature, starting at bit device_feature_select ∗ 32
+                * for any device_feature_select written by the driver
+                */
+               if (val == 0)
+                       d->mmio->cfg.device_feature = d->features;
+               else if (val == 1)
+                       d->mmio->cfg.device_feature = (d->features >> 32);
+               else
+                       d->mmio->cfg.device_feature = 0;
+               goto feature_write_through32;
+       case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
+               if (val > 1)
+                       bad_driver(d, "Unexpected driver select %u", val);
+               goto feature_write_through32;
+       case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
+               if (d->mmio->cfg.guest_feature_select == 0) {
+                       d->features_accepted &= ~((u64)0xFFFFFFFF);
+                       d->features_accepted |= val;
+               } else {
+                       assert(d->mmio->cfg.guest_feature_select == 1);
+                       d->features_accepted &= 0xFFFFFFFF;
+                       d->features_accepted |= ((u64)val) << 32;
+               }
+               /*
+                * 2.2.1:
+                *
+                *   The driver MUST NOT accept a feature which the device did
+                *   not offer
+                */
+               if (d->features_accepted & ~d->features)
+                       bad_driver(d, "over-accepted features %#llx of %#llx",
+                                  d->features_accepted, d->features);
+               goto feature_write_through32;
+       case offsetof(struct virtio_pci_mmio, cfg.device_status): {
+               u8 prev;
+
+               verbose("%s: device status -> %#x\n", d->name, val);
+               /*
+                * 4.1.4.3.1:
+                * 
+                *  The device MUST reset when 0 is written to device_status,
+                *  and present a 0 in device_status once that is done.
+                */
+               if (val == 0) {
+                       reset_device(d);
+                       goto write_through8;
+               }
 
-       /* If we saved off the original terminal settings, restore them now. */
-       if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
-               tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
-}
+               /* 2.1.1: The driver MUST NOT clear a device status bit. */
+               if (d->mmio->cfg.device_status & ~val)
+                       bad_driver(d, "unset of device status bit %#x -> %#x",
+                                  d->mmio->cfg.device_status, val);
 
-/* When the Guest tells us they updated the status field, we handle it. */
-static void update_device_status(struct device *dev)
-{
-       /* A zero status is a reset, otherwise it's a set of flags. */
-       if (dev->desc->status == 0)
-               reset_device(dev);
-       else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) {
-               warnx("Device %s configuration FAILED", dev->name);
-               if (dev->running)
-                       reset_device(dev);
-       } else {
-               if (dev->running)
-                       err(1, "Device %s features finalized twice", dev->name);
-               start_device(dev);
+               /*
+                * 2.1.2:
+                *
+                *  The device MUST NOT consume buffers or notify the driver
+                *  before DRIVER_OK.
+                */
+               if (val & VIRTIO_CONFIG_S_DRIVER_OK
+                   && !(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
+                       start_virtqueues(d);
+
+               /*
+                * 3.1.1:
+                *
+                *   The driver MUST follow this sequence to initialize a device:
+                *   - Reset the device.
+                *   - Set the ACKNOWLEDGE status bit: the guest OS has
+                 *     notice the device.
+                *   - Set the DRIVER status bit: the guest OS knows how
+                 *     to drive the device.
+                *   - Read device feature bits, and write the subset
+                *     of feature bits understood by the OS and driver
+                *     to the device. During this step the driver MAY
+                *     read (but MUST NOT write) the device-specific
+                *     configuration fields to check that it can
+                *     support the device before accepting it.
+                *   - Set the FEATURES_OK status bit.  The driver
+                *     MUST not accept new feature bits after this
+                *     step.
+                *   - Re-read device status to ensure the FEATURES_OK
+                *     bit is still set: otherwise, the device does
+                *     not support our subset of features and the
+                *     device is unusable.
+                *   - Perform device-specific setup, including
+                *     discovery of virtqueues for the device,
+                *     optional per-bus setup, reading and possibly
+                *     writing the device’s virtio configuration
+                *     space, and population of virtqueues.
+                *   - Set the DRIVER_OK status bit. At this point the
+                 *     device is “live”.
+                */
+               prev = 0;
+               switch (val & ~d->mmio->cfg.device_status) {
+               case VIRTIO_CONFIG_S_DRIVER_OK:
+                       prev |= VIRTIO_CONFIG_S_FEATURES_OK; /* fall thru */
+               case VIRTIO_CONFIG_S_FEATURES_OK:
+                       prev |= VIRTIO_CONFIG_S_DRIVER; /* fall thru */
+               case VIRTIO_CONFIG_S_DRIVER:
+                       prev |= VIRTIO_CONFIG_S_ACKNOWLEDGE; /* fall thru */
+               case VIRTIO_CONFIG_S_ACKNOWLEDGE:
+                       break;
+               default:
+                       bad_driver(d, "unknown device status bit %#x -> %#x",
+                                  d->mmio->cfg.device_status, val);
+               }
+               if (d->mmio->cfg.device_status != prev)
+                       bad_driver(d, "unexpected status transition %#x -> %#x",
+                                  d->mmio->cfg.device_status, val);
+
+               /* If they just wrote FEATURES_OK, we make sure they read */
+               switch (val & ~d->mmio->cfg.device_status) {
+               case VIRTIO_CONFIG_S_FEATURES_OK:
+                       d->wrote_features_ok = true;
+                       break;
+               case VIRTIO_CONFIG_S_DRIVER_OK:
+                       if (d->wrote_features_ok)
+                               bad_driver(d, "did not re-read FEATURES_OK");
+                       break;
+               }
+               goto write_through8;
        }
-}
+       case offsetof(struct virtio_pci_mmio, cfg.queue_select):
+               vq = vq_by_num(d, val);
+               /*
+                * 4.1.4.3.1:
+                *
+                *  The device MUST present a 0 in queue_size if the virtqueue
+                *  corresponding to the current queue_select is unavailable.
+                */
+               if (!vq) {
+                       d->mmio->cfg.queue_size = 0;
+                       goto write_through16;
+               }
+               /* Save registers for old vq, if it was a valid vq */
+               if (d->mmio->cfg.queue_size)
+                       save_vq_config(&d->mmio->cfg,
+                                      vq_by_num(d, d->mmio->cfg.queue_select));
+               /* Restore the registers for the queue they asked for */
+               restore_vq_config(&d->mmio->cfg, vq);
+               goto write_through16;
+       case offsetof(struct virtio_pci_mmio, cfg.queue_size):
+               /*
+                * 4.1.4.3.2:
+                *
+                *  The driver MUST NOT write a value which is not a power of 2
+                *  to queue_size.
+                */
+               if (val & (val-1))
+                       bad_driver(d, "invalid queue size %u", val);
+               if (d->mmio->cfg.queue_enable)
+                       bad_driver(d, "changing queue size on live device");
+               goto write_through16;
+       case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
+               bad_driver(d, "attempt to set MSIX vector to %u", val);
+       case offsetof(struct virtio_pci_mmio, cfg.queue_enable): {
+               struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select);
 
-/*L:215
- * This is the generic routine we call when the Guest uses LHCALL_NOTIFY.  In
- * particular, it's used to notify us of device status changes during boot.
- */
-static void handle_output(unsigned long addr)
-{
-       struct device *i;
+               /*
+                * 4.1.4.3.2:
+                *
+                *  The driver MUST NOT write a 0 to queue_enable.
+                */
+               if (val != 1)
+                       bad_driver(d, "setting queue_enable to %u", val);
 
-       /* Check each device. */
-       for (i = devices.dev; i; i = i->next) {
-               struct virtqueue *vq;
+               /*
+                * 3.1.1:
+                *
+                *  7. Perform device-specific setup, including discovery of
+                *     virtqueues for the device, optional per-bus setup,
+                *     reading and possibly writing the device’s virtio
+                *     configuration space, and population of virtqueues.
+                *  8. Set the DRIVER_OK status bit.
+                *
+                * All our devices require all virtqueues to be enabled, so
+                * they should have done that before setting DRIVER_OK.
+                */
+               if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)
+                       bad_driver(d, "enabling vq after DRIVER_OK");
 
+               d->mmio->cfg.queue_enable = val;
+               save_vq_config(&d->mmio->cfg, vq);
+               check_virtqueue(d, vq);
+               goto write_through16;
+       }
+       case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
+               bad_driver(d, "attempt to write to queue_notify_off");
+       case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
+       case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
+       case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
+       case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
+       case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
+       case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
                /*
-                * Notifications to device descriptors mean they updated the
-                * device status.
+                * 4.1.4.3.2:
+                *
+                *  The driver MUST configure the other virtqueue fields before
+                *  enabling the virtqueue with queue_enable.
                 */
-               if (from_guest_phys(addr) == i->desc) {
-                       update_device_status(i);
-                       return;
-               }
+               if (d->mmio->cfg.queue_enable)
+                       bad_driver(d, "changing queue on live device");
+
+               /*
+                * 3.1.1:
+                *
+                *  The driver MUST follow this sequence to initialize a device:
+                *...
+                *  5. Set the FEATURES_OK status bit. The driver MUST not
+                *  accept new feature bits after this step.
+                */
+               if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK))
+                       bad_driver(d, "setting up vq before FEATURES_OK");
 
-               /* Devices should not be used before features are finalized. */
-               for (vq = i->vq; vq; vq = vq->next) {
-                       if (addr != vq->config.pfn*getpagesize())
-                               continue;
-                       errx(1, "Notification on %s before setup!", i->name);
+               /*
+                *  6. Re-read device status to ensure the FEATURES_OK bit is
+                *     still set...
+                */
+               if (d->wrote_features_ok)
+                       bad_driver(d, "didn't re-read FEATURES_OK before setup");
+
+               goto write_through32;
+       case offsetof(struct virtio_pci_mmio, notify):
+               vq = vq_by_num(d, val);
+               if (!vq)
+                       bad_driver(d, "Invalid vq notification on %u", val);
+               /* Notify the process handling this vq by adding 1 to eventfd */
+               write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
+               goto write_through16;
+       case offsetof(struct virtio_pci_mmio, isr):
+               bad_driver(d, "Unexpected write to isr");
+       /* Weird corner case: write to emerg_wr of console */
+       case sizeof(struct virtio_pci_mmio)
+               + offsetof(struct virtio_console_config, emerg_wr):
+               if (strcmp(d->name, "console") == 0) {
+                       char c = val;
+                       write(STDOUT_FILENO, &c, 1);
+                       goto write_through32;
                }
+               /* Fall through... */
+       default:
+               /*
+                * 4.1.4.3.2:
+                *
+                *   The driver MUST NOT write to device_feature, num_queues,
+                *   config_generation or queue_notify_off.
+                */
+               bad_driver(d, "Unexpected write to offset %u", off);
        }
 
+feature_write_through32:
        /*
-        * Early console write is done using notify on a nul-terminated string
-        * in Guest memory.  It's also great for hacking debugging messages
-        * into a Guest.
+        * 3.1.1:
+        *
+        *   The driver MUST follow this sequence to initialize a device:
+        *...
+        *   - Set the DRIVER status bit: the guest OS knows how
+        *     to drive the device.
+        *   - Read device feature bits, and write the subset
+        *     of feature bits understood by the OS and driver
+        *     to the device.
+        *...
+        *   - Set the FEATURES_OK status bit. The driver MUST not
+        *     accept new feature bits after this step.
         */
-       if (addr >= guest_limit)
-               errx(1, "Bad NOTIFY %#lx", addr);
+       if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
+               bad_driver(d, "feature write before VIRTIO_CONFIG_S_DRIVER");
+       if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)
+               bad_driver(d, "feature write after VIRTIO_CONFIG_S_FEATURES_OK");
 
-       write(STDOUT_FILENO, from_guest_phys(addr),
-             strnlen(from_guest_phys(addr), guest_limit - addr));
+       /*
+        * 4.1.3.1:
+        *
+        *  The driver MUST access each field using the “natural” access
+        *  method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
+        *  16-bit fields and 8-bit accesses for 8-bit fields.
+        */
+write_through32:
+       if (mask != 0xFFFFFFFF) {
+               bad_driver(d, "non-32-bit write to offset %u (%#x)",
+                          off, getreg(eip));
+               return;
+       }
+       memcpy((char *)d->mmio + off, &val, 4);
+       return;
+
+write_through16:
+       if (mask != 0xFFFF)
+               bad_driver(d, "non-16-bit write to offset %u (%#x)",
+                          off, getreg(eip));
+       memcpy((char *)d->mmio + off, &val, 2);
+       return;
+
+write_through8:
+       if (mask != 0xFF)
+               bad_driver(d, "non-8-bit write to offset %u (%#x)",
+                          off, getreg(eip));
+       memcpy((char *)d->mmio + off, &val, 1);
+       return;
 }
 
-/*L:190
- * Device Setup
- *
- * All devices need a descriptor so the Guest knows it exists, and a "struct
- * device" so the Launcher can keep track of it.  We have common helper
- * routines to allocate and manage them.
- */
-
-/*
- * The layout of the device page is a "struct lguest_device_desc" followed by a
- * number of virtqueue descriptors, then two sets of feature bits, then an
- * array of configuration bytes.  This routine returns the configuration
- * pointer.
- */
-static u8 *device_config(const struct device *dev)
+static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
 {
-       return (void *)(dev->desc + 1)
-               + dev->num_vq * sizeof(struct lguest_vqconfig)
-               + dev->feature_len * 2;
+       u8 isr;
+       u32 val = 0;
+
+       switch (off) {
+       case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
+       case offsetof(struct virtio_pci_mmio, cfg.device_feature):
+       case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
+       case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
+               /*
+                * 3.1.1:
+                *
+                *   The driver MUST follow this sequence to initialize a device:
+                *...
+                *   - Set the DRIVER status bit: the guest OS knows how
+                *     to drive the device.
+                *   - Read device feature bits, and write the subset
+                *     of feature bits understood by the OS and driver
+                *     to the device.
+                */
+               if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
+                       bad_driver(d,
+                                  "feature read before VIRTIO_CONFIG_S_DRIVER");
+               goto read_through32;
+       case offsetof(struct virtio_pci_mmio, cfg.msix_config):
+               bad_driver(d, "read of msix_config");
+       case offsetof(struct virtio_pci_mmio, cfg.num_queues):
+               goto read_through16;
+       case offsetof(struct virtio_pci_mmio, cfg.device_status):
+               /* As they did read, any write of FEATURES_OK is now fine. */
+               d->wrote_features_ok = false;
+               goto read_through8;
+       case offsetof(struct virtio_pci_mmio, cfg.config_generation):
+               /*
+                * 4.1.4.3.1:
+                *
+                *  The device MUST present a changed config_generation after
+                *  the driver has read a device-specific configuration value
+                *  which has changed since any part of the device-specific
+                *  configuration was last read.
+                *
+                * This is simple: none of our devices change config, so this
+                * is always 0.
+                */
+               goto read_through8;
+       case offsetof(struct virtio_pci_mmio, notify):
+               /*
+                * 3.1.1:
+                *
+                *   The driver MUST NOT notify the device before setting
+                *   DRIVER_OK.
+                */
+               if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
+                       bad_driver(d, "notify before VIRTIO_CONFIG_S_DRIVER_OK");
+               goto read_through16;
+       case offsetof(struct virtio_pci_mmio, isr):
+               if (mask != 0xFF)
+                       bad_driver(d, "non-8-bit read from offset %u (%#x)",
+                                  off, getreg(eip));
+               isr = d->mmio->isr;
+               /*
+                * 4.1.4.5.1:
+                *
+                *  The device MUST reset ISR status to 0 on driver read. 
+                */
+               d->mmio->isr = 0;
+               return isr;
+       case offsetof(struct virtio_pci_mmio, padding):
+               bad_driver(d, "read from padding (%#x)", getreg(eip));
+       default:
+               /* Read from device config space, beware unaligned overflow */
+               if (off > d->mmio_size - 4)
+                       bad_driver(d, "read past end (%#x)", getreg(eip));
+
+               /*
+                * 3.1.1:
+                *  The driver MUST follow this sequence to initialize a device:
+                *...
+                *  3. Set the DRIVER status bit: the guest OS knows how to
+                *  drive the device.
+                *  4. Read device feature bits, and write the subset of
+                *  feature bits understood by the OS and driver to the
+                *  device. During this step the driver MAY read (but MUST NOT
+                *  write) the device-specific configuration fields to check
+                *  that it can support the device before accepting it.
+                */
+               if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
+                       bad_driver(d,
+                                  "config read before VIRTIO_CONFIG_S_DRIVER");
+
+               if (mask == 0xFFFFFFFF)
+                       goto read_through32;
+               else if (mask == 0xFFFF)
+                       goto read_through16;
+               else
+                       goto read_through8;
+       }
+
+       /*
+        * 4.1.3.1:
+        *
+        *  The driver MUST access each field using the “natural” access
+        *  method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
+        *  16-bit fields and 8-bit accesses for 8-bit fields.
+        */
+read_through32:
+       if (mask != 0xFFFFFFFF)
+               bad_driver(d, "non-32-bit read to offset %u (%#x)",
+                          off, getreg(eip));
+       memcpy(&val, (char *)d->mmio + off, 4);
+       return val;
+
+read_through16:
+       if (mask != 0xFFFF)
+               bad_driver(d, "non-16-bit read to offset %u (%#x)",
+                          off, getreg(eip));
+       memcpy(&val, (char *)d->mmio + off, 2);
+       return val;
+
+read_through8:
+       if (mask != 0xFF)
+               bad_driver(d, "non-8-bit read to offset %u (%#x)",
+                          off, getreg(eip));
+       memcpy(&val, (char *)d->mmio + off, 1);
+       return val;
 }
 
-/*
- * This routine allocates a new "struct lguest_device_desc" from descriptor
- * table page just above the Guest's normal memory.  It returns a pointer to
- * that descriptor.
- */
-static struct lguest_device_desc *new_dev_desc(u16 type)
+static void emulate_mmio(unsigned long paddr, const u8 *insn)
 {
-       struct lguest_device_desc d = { .type = type };
-       void *p;
+       u32 val, off, mask = 0xFFFFFFFF, insnlen = 0;
+       struct device *d = find_mmio_region(paddr, &off);
+       unsigned long args[] = { LHREQ_TRAP, 14 };
 
-       /* Figure out where the next device config is, based on the last one. */
-       if (devices.lastdev)
-               p = device_config(devices.lastdev)
-                       + devices.lastdev->desc->config_len;
-       else
-               p = devices.descpage;
+       if (!d) {
+               warnx("MMIO touching %#08lx (not a device)", paddr);
+               goto reinject;
+       }
+
+       /* Prefix makes it a 16 bit op */
+       if (insn[0] == 0x66) {
+               mask = 0xFFFF;
+               insnlen++;
+       }
 
-       /* We only have one page for all the descriptors. */
-       if (p + sizeof(d) > (void *)devices.descpage + getpagesize())
-               errx(1, "Too many devices");
+       /* iowrite */
+       if (insn[insnlen] == 0x89) {
+               /* Next byte is r/m byte: bits 3-5 are register. */
+               val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask);
+               emulate_mmio_write(d, off, val, mask);
+               insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
+       } else if (insn[insnlen] == 0x8b) { /* ioread */
+               /* Next byte is r/m byte: bits 3-5 are register. */
+               val = emulate_mmio_read(d, off, mask);
+               setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask);
+               insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
+       } else if (insn[0] == 0x88) { /* 8-bit iowrite */
+               mask = 0xff;
+               /* Next byte is r/m byte: bits 3-5 are register. */
+               val = getreg_num((insn[1] >> 3) & 0x7, mask);
+               emulate_mmio_write(d, off, val, mask);
+               insnlen = 2 + insn_displacement_len(insn[1]);
+       } else if (insn[0] == 0x8a) { /* 8-bit ioread */
+               mask = 0xff;
+               val = emulate_mmio_read(d, off, mask);
+               setreg_num((insn[1] >> 3) & 0x7, val, mask);
+               insnlen = 2 + insn_displacement_len(insn[1]);
+       } else {
+               warnx("Unknown MMIO instruction touching %#08lx:"
+                    " %02x %02x %02x %02x at %u",
+                    paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip));
+       reinject:
+               /* Inject trap into Guest. */
+               if (write(lguest_fd, args, sizeof(args)) < 0)
+                       err(1, "Reinjecting trap 14 for fault at %#x",
+                           getreg(eip));
+               return;
+       }
 
-       /* p might not be aligned, so we memcpy in. */
-       return memcpy(p, &d, sizeof(d));
+       /* Finally, we've "done" the instruction, so move past it. */
+       setreg(eip, getreg(eip) + insnlen);
 }
 
-/*
- * Each device descriptor is followed by the description of its virtqueues.  We
- * specify how many descriptors the virtqueue is to have.
+/*L:190
+ * Device Setup
+ *
+ * All devices need a descriptor so the Guest knows it exists, and a "struct
+ * device" so the Launcher can keep track of it.  We have common helper
+ * routines to allocate and manage them.
  */
-static void add_virtqueue(struct device *dev, unsigned int num_descs,
-                         void (*service)(struct virtqueue *))
+static void add_pci_virtqueue(struct device *dev,
+                             void (*service)(struct virtqueue *),
+                             const char *name)
 {
-       unsigned int pages;
        struct virtqueue **i, *vq = malloc(sizeof(*vq));
-       void *p;
-
-       /* First we need some memory for this virtqueue. */
-       pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1)
-               / getpagesize();
-       p = get_pages(pages);
 
        /* Initialize the virtqueue */
        vq->next = NULL;
        vq->last_avail_idx = 0;
        vq->dev = dev;
+       vq->name = name;
 
        /*
         * This is the routine the service thread will run, and its Process ID
@@ -1218,25 +2393,11 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
        vq->thread = (pid_t)-1;
 
        /* Initialize the configuration. */
-       vq->config.num = num_descs;
-       vq->config.irq = devices.next_irq++;
-       vq->config.pfn = to_guest_phys(p) / getpagesize();
-
-       /* Initialize the vring. */
-       vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN);
-
-       /*
-        * Append virtqueue to this device's descriptor.  We use
-        * device_config() to get the end of the device's current virtqueues;
-        * we check that we haven't added any config or feature information
-        * yet, otherwise we'd be overwriting them.
-        */
-       assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0);
-       memcpy(device_config(dev), &vq->config, sizeof(vq->config));
-       dev->num_vq++;
-       dev->desc->num_vq++;
+       reset_vq_pci_config(vq);
+       vq->pci_config.queue_notify_off = 0;
 
-       verbose("Virtqueue page %#lx\n", to_guest_phys(p));
+       /* Add one to the number of queues */
+       vq->dev->mmio->cfg.num_queues++;
 
        /*
         * Add to tail of list, so dev->vq is first vq, dev->vq->next is
@@ -1246,73 +2407,239 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
        *i = vq;
 }
 
-/*
- * The first half of the feature bitmask is for us to advertise features.  The
- * second half is for the Guest to accept features.
- */
-static void add_feature(struct device *dev, unsigned bit)
+/* The Guest accesses the feature bits via the PCI common config MMIO region */
+static void add_pci_feature(struct device *dev, unsigned bit)
 {
-       u8 *features = get_feature_bits(dev);
+       dev->features |= (1ULL << bit);
+}
 
-       /* We can't extend the feature bits once we've added config bytes */
-       if (dev->desc->feature_len <= bit / CHAR_BIT) {
-               assert(dev->desc->config_len == 0);
-               dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1;
-       }
+/* For devices with no config. */
+static void no_device_config(struct device *dev)
+{
+       dev->mmio_addr = get_mmio_region(dev->mmio_size);
 
-       features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT));
+       dev->config.bar[0] = dev->mmio_addr;
+       /* Bottom 4 bits must be zero */
+       assert(~(dev->config.bar[0] & 0xF));
+}
+
+/* This puts the device config into BAR0 */
+static void set_device_config(struct device *dev, const void *conf, size_t len)
+{
+       /* Set up BAR 0 */
+       dev->mmio_size += len;
+       dev->mmio = realloc(dev->mmio, dev->mmio_size);
+       memcpy(dev->mmio + 1, conf, len);
+
+       /*
+        * 4.1.4.6:
+        *
+        *  The device MUST present at least one VIRTIO_PCI_CAP_DEVICE_CFG
+        *  capability for any device type which has a device-specific
+        *  configuration.
+        */
+       /* Hook up device cfg */
+       dev->config.cfg_access.cap.cap_next
+               = offsetof(struct pci_config, device);
+
+       /*
+        * 4.1.4.6.1:
+        *
+        *  The offset for the device-specific configuration MUST be 4-byte
+        *  aligned.
+        */
+       assert(dev->config.cfg_access.cap.cap_next % 4 == 0);
+
+       /* Fix up device cfg field length. */
+       dev->config.device.length = len;
+
+       /* The rest is the same as the no-config case */
+       no_device_config(dev);
+}
+
+static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
+                    size_t bar_offset, size_t bar_bytes, u8 next)
+{
+       cap->cap_vndr = PCI_CAP_ID_VNDR;
+       cap->cap_next = next;
+       cap->cap_len = caplen;
+       cap->cfg_type = type;
+       cap->bar = 0;
+       memset(cap->padding, 0, sizeof(cap->padding));
+       cap->offset = bar_offset;
+       cap->length = bar_bytes;
 }
 
 /*
- * This routine sets the configuration fields for an existing device's
- * descriptor.  It only works for the last device, but that's OK because that's
- * how we use it.
+ * This sets up the pci_config structure, as defined in the virtio 1.0
+ * standard (and PCI standard).
  */
-static void set_config(struct device *dev, unsigned len, const void *conf)
+static void init_pci_config(struct pci_config *pci, u16 type,
+                           u8 class, u8 subclass)
 {
-       /* Check we haven't overflowed our single page. */
-       if (device_config(dev) + len > devices.descpage + getpagesize())
-               errx(1, "Too many devices");
+       size_t bar_offset, bar_len;
+
+       /*
+        * 4.1.4.4.1:
+        *
+        *  The device MUST either present notify_off_multiplier as an even
+        *  power of 2, or present notify_off_multiplier as 0.
+        *
+        * 2.1.2:
+        *
+        *   The device MUST initialize device status to 0 upon reset. 
+        */
+       memset(pci, 0, sizeof(*pci));
+
+       /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
+       pci->vendor_id = 0x1AF4;
+       /* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
+       pci->device_id = 0x1040 + type;
+
+       /*
+        * PCI have specific codes for different types of devices.
+        * Linux doesn't care, but it's a good clue for people looking
+        * at the device.
+        */
+       pci->class = class;
+       pci->subclass = subclass;
+
+       /*
+        * 4.1.2.1:
+        *
+        *  Non-transitional devices SHOULD have a PCI Revision ID of 1 or
+        *  higher
+        */
+       pci->revid = 1;
+
+       /*
+        * 4.1.2.1:
+        *
+        *  Non-transitional devices SHOULD have a PCI Subsystem Device ID of
+        *  0x40 or higher.
+        */
+       pci->subsystem_device_id = 0x40;
+
+       /* We use our dummy interrupt controller, and irq_line is the irq */
+       pci->irq_line = devices.next_irq++;
+       pci->irq_pin = 0;
+
+       /* Support for extended capabilities. */
+       pci->status = (1 << 4);
+
+       /* Link them in. */
+       /*
+        * 4.1.4.3.1:
+        *
+        *  The device MUST present at least one common configuration
+        *  capability.
+        */
+       pci->capabilities = offsetof(struct pci_config, common);
+
+       /* 4.1.4.3.1 ... offset MUST be 4-byte aligned. */
+       assert(pci->capabilities % 4 == 0);
+
+       bar_offset = offsetof(struct virtio_pci_mmio, cfg);
+       bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
+       init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
+                bar_offset, bar_len,
+                offsetof(struct pci_config, notify));
+
+       /*
+        * 4.1.4.4.1:
+        *
+        *  The device MUST present at least one notification capability.
+        */
+       bar_offset += bar_len;
+       bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
+
+       /*
+        * 4.1.4.4.1:
+        *
+        *  The cap.offset MUST be 2-byte aligned.
+        */
+       assert(pci->common.cap_next % 2 == 0);
+
+       /* FIXME: Use a non-zero notify_off, for per-queue notification? */
+       /*
+        * 4.1.4.4.1:
+        *
+        *  The value cap.length presented by the device MUST be at least 2 and
+        *  MUST be large enough to support queue notification offsets for all
+        *  supported queues in all possible configurations.
+        */
+       assert(bar_len >= 2);
+
+       init_cap(&pci->notify.cap, sizeof(pci->notify),
+                VIRTIO_PCI_CAP_NOTIFY_CFG,
+                bar_offset, bar_len,
+                offsetof(struct pci_config, isr));
+
+       bar_offset += bar_len;
+       bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
+       /*
+        * 4.1.4.5.1:
+        *
+        *  The device MUST present at least one VIRTIO_PCI_CAP_ISR_CFG
+        *  capability.
+        */
+       init_cap(&pci->isr, sizeof(pci->isr),
+                VIRTIO_PCI_CAP_ISR_CFG,
+                bar_offset, bar_len,
+                offsetof(struct pci_config, cfg_access));
+
+       /*
+        * 4.1.4.7.1:
+        *
+        * The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG
+        * capability.
+        */
+       /* This doesn't have any presence in the BAR */
+       init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
+                VIRTIO_PCI_CAP_PCI_CFG,
+                0, 0, 0);
 
-       /* Copy in the config information, and store the length. */
-       memcpy(device_config(dev), conf, len);
-       dev->desc->config_len = len;
+       bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
+       assert(bar_offset == sizeof(struct virtio_pci_mmio));
 
-       /* Size must fit in config_len field (8 bits)! */
-       assert(dev->desc->config_len == len);
+       /*
+        * This gets sewn in and length set in set_device_config().
+        * Some devices don't have a device configuration interface, so
+        * we never expose this if we don't call set_device_config().
+        */
+       init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
+                bar_offset, 0, 0);
 }
 
 /*
- * This routine does all the creation and setup of a new device, including
- * calling new_dev_desc() to allocate the descriptor and device memory.  We
- * don't actually start the service threads until later.
+ * This routine does all the creation and setup of a new device, but we don't
+ * actually place the MMIO region until we know the size (if any) of the
+ * device-specific config.  And we don't actually start the service threads
+ * until later.
  *
  * See what I mean about userspace being boring?
  */
-static struct device *new_device(const char *name, u16 type)
+static struct device *new_pci_device(const char *name, u16 type,
+                                    u8 class, u8 subclass)
 {
        struct device *dev = malloc(sizeof(*dev));
 
        /* Now we populate the fields one at a time. */
-       dev->desc = new_dev_desc(type);
        dev->name = name;
        dev->vq = NULL;
-       dev->feature_len = 0;
-       dev->num_vq = 0;
        dev->running = false;
-       dev->next = NULL;
+       dev->wrote_features_ok = false;
+       dev->mmio_size = sizeof(struct virtio_pci_mmio);
+       dev->mmio = calloc(1, dev->mmio_size);
+       dev->features = (u64)1 << VIRTIO_F_VERSION_1;
+       dev->features_accepted = 0;
 
-       /*
-        * Append to device list.  Prepending to a single-linked list is
-        * easier, but the user expects the devices to be arranged on the bus
-        * in command-line order.  The first network device on the command line
-        * is eth0, the first block device /dev/vda, etc.
-        */
-       if (devices.lastdev)
-               devices.lastdev->next = dev;
-       else
-               devices.dev = dev;
-       devices.lastdev = dev;
+       if (devices.device_num + 1 >= MAX_PCI_DEVICES)
+               errx(1, "Can only handle 31 PCI devices");
+
+       init_pci_config(&dev->config, type, class, subclass);
+       assert(!devices.pci[devices.device_num+1]);
+       devices.pci[++devices.device_num] = dev;
 
        return dev;
 }
@@ -1324,6 +2651,7 @@ static struct device *new_device(const char *name, u16 type)
 static void setup_console(void)
 {
        struct device *dev;
+       struct virtio_console_config conf;
 
        /* If we can save the initial standard input settings... */
        if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
@@ -1336,7 +2664,7 @@ static void setup_console(void)
                tcsetattr(STDIN_FILENO, TCSANOW, &term);
        }
 
-       dev = new_device("console", VIRTIO_ID_CONSOLE);
+       dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00);
 
        /* We store the console state in dev->priv, and initialize it. */
        dev->priv = malloc(sizeof(struct console_abort));
@@ -1348,10 +2676,14 @@ static void setup_console(void)
         * stdin.  When they put something in the output queue, we write it to
         * stdout.
         */
-       add_virtqueue(dev, VIRTQUEUE_NUM, console_input);
-       add_virtqueue(dev, VIRTQUEUE_NUM, console_output);
+       add_pci_virtqueue(dev, console_input, "input");
+       add_pci_virtqueue(dev, console_output, "output");
+
+       /* We need a configuration area for the emerg_wr early writes. */
+       add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE);
+       set_device_config(dev, &conf, sizeof(conf));
 
-       verbose("device %u: console\n", ++devices.device_num);
+       verbose("device %u: console\n", devices.device_num);
 }
 /*:*/
 
@@ -1449,6 +2781,7 @@ static void configure_device(int fd, const char *tapif, u32 ipaddr)
 static int get_tun_device(char tapif[IFNAMSIZ])
 {
        struct ifreq ifr;
+       int vnet_hdr_sz;
        int netfd;
 
        /* Start with this zeroed.  Messy but sure. */
@@ -1476,6 +2809,18 @@ static int get_tun_device(char tapif[IFNAMSIZ])
         */
        ioctl(netfd, TUNSETNOCSUM, 1);
 
+       /*
+        * In virtio before 1.0 (aka legacy virtio), we added a 16-bit
+        * field at the end of the network header iff
+        * VIRTIO_NET_F_MRG_RXBUF was negotiated.  For virtio 1.0,
+        * that became the norm, but we need to tell the tun device
+        * about our expanded header (which is called
+        * virtio_net_hdr_mrg_rxbuf in the legacy system).
+        */
+       vnet_hdr_sz = sizeof(struct virtio_net_hdr_v1);
+       if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0)
+               err(1, "Setting tun header size to %u", vnet_hdr_sz);
+
        memcpy(tapif, ifr.ifr_name, IFNAMSIZ);
        return netfd;
 }
@@ -1499,12 +2844,12 @@ static void setup_tun_net(char *arg)
        net_info->tunfd = get_tun_device(tapif);
 
        /* First we create a new network device. */
-       dev = new_device("net", VIRTIO_ID_NET);
+       dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00);
        dev->priv = net_info;
 
        /* Network devices need a recv and a send queue, just like console. */
-       add_virtqueue(dev, VIRTQUEUE_NUM, net_input);
-       add_virtqueue(dev, VIRTQUEUE_NUM, net_output);
+       add_pci_virtqueue(dev, net_input, "rx");
+       add_pci_virtqueue(dev, net_output, "tx");
 
        /*
         * We need a socket to perform the magic network ioctls to bring up the
@@ -1524,7 +2869,7 @@ static void setup_tun_net(char *arg)
        p = strchr(arg, ':');
        if (p) {
                str2mac(p+1, conf.mac);
-               add_feature(dev, VIRTIO_NET_F_MAC);
+               add_pci_feature(dev, VIRTIO_NET_F_MAC);
                *p = '\0';
        }
 
@@ -1538,25 +2883,21 @@ static void setup_tun_net(char *arg)
        configure_device(ipfd, tapif, ip);
 
        /* Expect Guest to handle everything except UFO */
-       add_feature(dev, VIRTIO_NET_F_CSUM);
-       add_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
-       add_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
-       add_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
-       add_feature(dev, VIRTIO_NET_F_GUEST_ECN);
-       add_feature(dev, VIRTIO_NET_F_HOST_TSO4);
-       add_feature(dev, VIRTIO_NET_F_HOST_TSO6);
-       add_feature(dev, VIRTIO_NET_F_HOST_ECN);
+       add_pci_feature(dev, VIRTIO_NET_F_CSUM);
+       add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
+       add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
+       add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
+       add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN);
+       add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4);
+       add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6);
+       add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN);
        /* We handle indirect ring entries */
-       add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
-       /* We're compliant with the damn spec. */
-       add_feature(dev, VIRTIO_F_ANY_LAYOUT);
-       set_config(dev, sizeof(conf), &conf);
+       add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
+       set_device_config(dev, &conf, sizeof(conf));
 
        /* We don't need the socket any more; setup is done. */
        close(ipfd);
 
-       devices.device_num++;
-
        if (bridging)
                verbose("device %u: tun %s attached to bridge: %s\n",
                        devices.device_num, tapif, arg);
@@ -1607,7 +2948,7 @@ static void blk_request(struct virtqueue *vq)
        head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
 
        /* Copy the output header from the front of the iov (adjusts iov) */
-       iov_consume(iov, out_num, &out, sizeof(out));
+       iov_consume(vq->dev, iov, out_num, &out, sizeof(out));
 
        /* Find and trim end of iov input array, for our status byte. */
        in = NULL;
@@ -1619,7 +2960,7 @@ static void blk_request(struct virtqueue *vq)
                }
        }
        if (!in)
-               errx(1, "Bad virtblk cmd with no room for status");
+               bad_driver_vq(vq, "Bad virtblk cmd with no room for status");
 
        /*
         * For historical reasons, block operations are expressed in 512 byte
@@ -1627,15 +2968,7 @@ static void blk_request(struct virtqueue *vq)
         */
        off = out.sector * 512;
 
-       /*
-        * In general the virtio block driver is allowed to try SCSI commands.
-        * It'd be nice if we supported eject, for example, but we don't.
-        */
-       if (out.type & VIRTIO_BLK_T_SCSI_CMD) {
-               fprintf(stderr, "Scsi commands unsupported\n");
-               *in = VIRTIO_BLK_S_UNSUPP;
-               wlen = sizeof(*in);
-       } else if (out.type & VIRTIO_BLK_T_OUT) {
+       if (out.type & VIRTIO_BLK_T_OUT) {
                /*
                 * Write
                 *
@@ -1657,7 +2990,7 @@ static void blk_request(struct virtqueue *vq)
                        /* Trim it back to the correct length */
                        ftruncate64(vblk->fd, vblk->len);
                        /* Die, bad Guest, die. */
-                       errx(1, "Write past end %llu+%u", off, ret);
+                       bad_driver_vq(vq, "Write past end %llu+%u", off, ret);
                }
 
                wlen = sizeof(*in);
@@ -1699,11 +3032,11 @@ static void setup_block_file(const char *filename)
        struct vblk_info *vblk;
        struct virtio_blk_config conf;
 
-       /* Creat the device. */
-       dev = new_device("block", VIRTIO_ID_BLOCK);
+       /* Create the device. */
+       dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80);
 
        /* The device has one virtqueue, where the Guest places requests. */
-       add_virtqueue(dev, VIRTQUEUE_NUM, blk_request);
+       add_pci_virtqueue(dev, blk_request, "request");
 
        /* Allocate the room for our own bookkeeping */
        vblk = dev->priv = malloc(sizeof(*vblk));
@@ -1712,9 +3045,6 @@ static void setup_block_file(const char *filename)
        vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
        vblk->len = lseek64(vblk->fd, 0, SEEK_END);
 
-       /* We support FLUSH. */
-       add_feature(dev, VIRTIO_BLK_F_FLUSH);
-
        /* Tell Guest how many sectors this device has. */
        conf.capacity = cpu_to_le64(vblk->len / 512);
 
@@ -1722,20 +3052,19 @@ static void setup_block_file(const char *filename)
         * Tell Guest not to put in too many descriptors at once: two are used
         * for the in and out elements.
         */
-       add_feature(dev, VIRTIO_BLK_F_SEG_MAX);
+       add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX);
        conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2);
 
-       /* Don't try to put whole struct: we have 8 bit limit. */
-       set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf);
+       set_device_config(dev, &conf, sizeof(struct virtio_blk_config));
 
        verbose("device %u: virtblock %llu sectors\n",
-               ++devices.device_num, le64_to_cpu(conf.capacity));
+               devices.device_num, le64_to_cpu(conf.capacity));
 }
 
 /*L:211
- * Our random number generator device reads from /dev/random into the Guest's
+ * Our random number generator device reads from /dev/urandom into the Guest's
  * input buffers.  The usual case is that the Guest doesn't want random numbers
- * and so has no buffers although /dev/random is still readable, whereas
+ * and so has no buffers although /dev/urandom is still readable, whereas
  * console is the reverse.
  *
  * The same logic applies, however.
@@ -1754,7 +3083,7 @@ static void rng_input(struct virtqueue *vq)
        /* First we need a buffer from the Guests's virtqueue. */
        head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
        if (out_num)
-               errx(1, "Output buffers in rng?");
+               bad_driver_vq(vq, "Output buffers in rng?");
 
        /*
         * Just like the console write, we loop to cover the whole iovec.
@@ -1763,8 +3092,8 @@ static void rng_input(struct virtqueue *vq)
        while (!iov_empty(iov, in_num)) {
                len = readv(rng_info->rfd, iov, in_num);
                if (len <= 0)
-                       err(1, "Read from /dev/random gave %i", len);
-               iov_consume(iov, in_num, NULL, len);
+                       err(1, "Read from /dev/urandom gave %i", len);
+               iov_consume(vq->dev, iov, in_num, NULL, len);
                totlen += len;
        }
 
@@ -1780,17 +3109,20 @@ static void setup_rng(void)
        struct device *dev;
        struct rng_info *rng_info = malloc(sizeof(*rng_info));
 
-       /* Our device's privat info simply contains the /dev/random fd. */
-       rng_info->rfd = open_or_die("/dev/random", O_RDONLY);
+       /* Our device's private info simply contains the /dev/urandom fd. */
+       rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY);
 
        /* Create the new device. */
-       dev = new_device("rng", VIRTIO_ID_RNG);
+       dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0);
        dev->priv = rng_info;
 
        /* The device has one virtqueue, where the Guest places inbufs. */
-       add_virtqueue(dev, VIRTQUEUE_NUM, rng_input);
+       add_pci_virtqueue(dev, rng_input, "input");
 
-       verbose("device %u: rng\n", devices.device_num++);
+       /* We don't have any configuration space */
+       no_device_config(dev);
+
+       verbose("device %u: rng\n", devices.device_num);
 }
 /* That's the end of device setup. */
 
@@ -1820,17 +3152,23 @@ static void __attribute__((noreturn)) restart_guest(void)
 static void __attribute__((noreturn)) run_guest(void)
 {
        for (;;) {
-               unsigned long notify_addr;
+               struct lguest_pending notify;
                int readval;
 
                /* We read from the /dev/lguest device to run the Guest. */
-               readval = pread(lguest_fd, &notify_addr,
-                               sizeof(notify_addr), cpu_id);
-
-               /* One unsigned long means the Guest did HCALL_NOTIFY */
-               if (readval == sizeof(notify_addr)) {
-                       verbose("Notify on address %#lx\n", notify_addr);
-                       handle_output(notify_addr);
+               readval = pread(lguest_fd, &notify, sizeof(notify), cpu_id);
+               if (readval == sizeof(notify)) {
+                       if (notify.trap == 13) {
+                               verbose("Emulating instruction at %#x\n",
+                                       getreg(eip));
+                               emulate_insn(notify.insn);
+                       } else if (notify.trap == 14) {
+                               verbose("Emulating MMIO at %#x\n",
+                                       getreg(eip));
+                               emulate_mmio(notify.addr, notify.insn);
+                       } else
+                               errx(1, "Unknown trap %i addr %#08x\n",
+                                    notify.trap, notify.addr);
                /* ENOENT means the Guest died.  Reading tells us why. */
                } else if (errno == ENOENT) {
                        char reason[1024] = { 0 };
@@ -1893,11 +3231,9 @@ int main(int argc, char *argv[])
        main_args = argv;
 
        /*
-        * First we initialize the device list.  We keep a pointer to the last
-        * device, and the next interrupt number to use for devices (1:
-        * remember that 0 is used by the timer).
+        * First we initialize the device list.  We remember next interrupt
+        * number to use for devices (1: remember that 0 is used by the timer).
         */
-       devices.lastdev = NULL;
        devices.next_irq = 1;
 
        /* We're CPU 0.  In fact, that's the only CPU possible right now. */
@@ -1921,12 +3257,14 @@ int main(int argc, char *argv[])
                        guest_base = map_zeroed_pages(mem / getpagesize()
                                                      + DEVICE_PAGES);
                        guest_limit = mem;
-                       guest_max = mem + DEVICE_PAGES*getpagesize();
-                       devices.descpage = get_pages(1);
+                       guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize();
                        break;
                }
        }
 
+       /* We always have a console device, and it's always device 1. */
+       setup_console();
+
        /* The options are fairly straight-forward */
        while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
                switch (c) {
@@ -1967,8 +3305,8 @@ int main(int argc, char *argv[])
 
        verbose("Guest base is at %p\n", guest_base);
 
-       /* We always have a console device */
-       setup_console();
+       /* Initialize the (fake) PCI host bridge device. */
+       init_pci_host_bridge();
 
        /* Now we load the kernel */
        start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
index 6c14afe8c1b18ea1ab4a50aa4b6dc46925d304a1..db1d3a29d97fec67c47a4dd84eca2f7779ec1865 100644 (file)
@@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
        memcpy_t fn = r->fn.memcpy;
        int i;
 
-       memcpy_alloc_mem(&src, &dst, len);
+       memcpy_alloc_mem(&dst, &src, len);
 
        if (prefault)
                fn(dst, src, len);
@@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
        void *src = NULL, *dst = NULL;
        int i;
 
-       memcpy_alloc_mem(&src, &dst, len);
+       memcpy_alloc_mem(&dst, &src, len);
 
        if (prefault)
                fn(dst, src, len);
index ff95a68741d1ccdb54e54d929f2292e88963a5d1..ac8721ffa6c8c681ccdb563607d5b80917062438 100644 (file)
@@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
   endif
 endif
 
+ifeq ($(RAW_ARCH),sparc64)
+  ARCH ?= sparc
+endif
+
 ARCH ?= $(RAW_ARCH)
 
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
index 42ac05aaf8ac1a8bf004c1e664aa10a1853111bc..b32ff3372514da7da33da49cfc2d52755f1db935 100644 (file)
@@ -49,7 +49,7 @@ test-hello.bin:
        $(BUILD)
 
 test-pthread-attr-setaffinity-np.bin:
-       $(BUILD) -Werror -lpthread
+       $(BUILD) -D_GNU_SOURCE -Werror -lpthread
 
 test-stackprotector-all.bin:
        $(BUILD) -Werror -fstack-protector-all
index 0a0d3ecb4e8af81b77222c29f92efcbf4485313b..2b81b72eca23726a03b263fb21efa649c13c9495 100644 (file)
@@ -5,10 +5,11 @@ int main(void)
 {
        int ret = 0;
        pthread_attr_t thread_attr;
+       cpu_set_t cs;
 
        pthread_attr_init(&thread_attr);
        /* don't care abt exact args, just the API itself in libpthread */
-       ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
+       ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
 
        return ret;
 }
index 47b78b3f03257385023629696dda95e57d2b5ab7..6da965bdbc2caf8762b5018c6837422a00aac456 100644 (file)
@@ -25,6 +25,10 @@ static int perf_flag_probe(void)
        if (cpu < 0)
                cpu = 0;
 
+       /*
+        * Using -1 for the pid is a workaround to avoid gratuitous jump label
+        * changes.
+        */
        while (1) {
                /* check cloexec flag */
                fd = sys_perf_event_open(&attr, pid, cpu, -1,
@@ -47,16 +51,24 @@ static int perf_flag_probe(void)
                  err, strerror_r(err, sbuf, sizeof(sbuf)));
 
        /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
-       fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+       while (1) {
+               fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+               if (fd < 0 && pid == -1 && errno == EACCES) {
+                       pid = 0;
+                       continue;
+               }
+               break;
+       }
        err = errno;
 
+       if (fd >= 0)
+               close(fd);
+
        if (WARN_ONCE(fd < 0 && err != EBUSY,
                      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
                      err, strerror_r(err, sbuf, sizeof(sbuf))))
                return -1;
 
-       close(fd);
-
        return 0;
 }
 
index c94a9e03ecf15744800d4a6bc68cca28ca70259e..e99a67632831a8e6548fae8a40b654f01b009d1e 100644 (file)
@@ -28,7 +28,7 @@ struct perf_mmap {
        int              mask;
        int              refcnt;
        unsigned int     prev;
-       char             event_copy[PERF_SAMPLE_MAX_SIZE];
+       char             event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
 };
 
 struct perf_evlist {
index b24f9d8727a894ccae13353abad0b951ec1b0916..33b7a2aef71322ab88b93e675c070bfc4a7c7da7 100644 (file)
 #include <symbol/kallsyms.h>
 #include "debug.h"
 
+#ifndef EM_AARCH64
+#define EM_AARCH64     183  /* ARM 64 bit */
+#endif
+
+
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
 extern char *cplus_demangle(const char *, int);
 
diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore
new file mode 100644 (file)
index 0000000..06e96be
--- /dev/null
@@ -0,0 +1 @@
+/tmon
index e775adcbd29fdd3c0fccb66a09efc86e1fd95216..0788621c8d760f01f714d377a18c6a390a990fde 100644 (file)
@@ -2,8 +2,8 @@ VERSION = 1.0
 
 BINDIR=usr/bin
 WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
-CFLAGS= -O1 ${WARNFLAGS} -fstack-protector
-CC=gcc
+CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector
+CC=$(CROSS_COMPILE)gcc
 
 CFLAGS+=-D VERSION=\"$(VERSION)\"
 LDFLAGS+=
@@ -16,12 +16,21 @@ INSTALL_CONFIGFILE=install -m 644 -p
 CONFIG_FILE=
 CONFIG_PATH=
 
+# Static builds might require -ltinfo, for instance
+ifneq ($(findstring -static, $(LDFLAGS)),)
+STATIC := --static
+endif
+
+TMON_LIBS=-lm -lpthread
+TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \
+                    pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \
+                    echo -lpanel -lncurses)
 
 OBJS = tmon.o tui.o sysfs.o pid.o
 OBJS +=
 
 tmon: $(OBJS) Makefile tmon.h
-       $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
+       $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS)  -o $(TARGET) $(TMON_LIBS)
 
 valgrind: tmon
         sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET)  1> /dev/null
index 0be727cb9892ccc50e412a8a8cdb28f3f2476c68..02d5179803aae1cdfa43ecf23e25bc8e1c9b9e43 100644 (file)
@@ -55,6 +55,8 @@ The \fB-l --log\fP option write data to /var/tmp/tmon.log
 .PP
 The \fB-t --time-interval\fP option sets the polling interval in seconds
 .PP
+The \fB-T --target-temp\fP option sets the initial target temperature
+.PP
 The \fB-v --version\fP option shows the version of \fBtmon \fP
 .PP
 The \fB-z --zone\fP option sets the target therma zone instance to be controlled
index 09b7c3218334ba29dad1192d5dbd5a963dd51553..9aa19652e8e859a34a5db1edbae0c02170499967 100644 (file)
@@ -64,6 +64,7 @@ void usage()
        printf("  -h, --help            show this help message\n");
        printf("  -l, --log             log data to /var/tmp/tmon.log\n");
        printf("  -t, --time-interval   sampling time interval, > 1 sec.\n");
+       printf("  -T, --target-temp     initial target temperature\n");
        printf("  -v, --version         show version\n");
        printf("  -z, --zone            target thermal zone id\n");
 
@@ -219,6 +220,7 @@ static struct option opts[] = {
        { "control", 1, NULL, 'c' },
        { "daemon", 0, NULL, 'd' },
        { "time-interval", 1, NULL, 't' },
+       { "target-temp", 1, NULL, 'T' },
        { "log", 0, NULL, 'l' },
        { "help", 0, NULL, 'h' },
        { "version", 0, NULL, 'v' },
@@ -231,7 +233,7 @@ int main(int argc, char **argv)
 {
        int err = 0;
        int id2 = 0, c;
-       double yk = 0.0; /* controller output */
+       double yk = 0.0, temp; /* controller output */
        int target_tz_index;
 
        if (geteuid() != 0) {
@@ -239,7 +241,7 @@ int main(int argc, char **argv)
                exit(EXIT_FAILURE);
        }
 
-       while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) {
+       while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) {
                switch (c) {
                case 'c':
                        no_control = 0;
@@ -254,6 +256,14 @@ int main(int argc, char **argv)
                        if (ticktime < 1)
                                ticktime = 1;
                        break;
+               case 'T':
+                       temp = strtod(optarg, NULL);
+                       if (temp < 0) {
+                               fprintf(stderr, "error: temperature must be positive\n");
+                               return 1;
+                       }
+                       target_temp_user = temp;
+                       break;
                case 'l':
                        printf("Logging data to /var/tmp/tmon.log\n");
                        logging = 1;
index 89f8ef0e15c810936737a51f49f1a87f1f2c5648..b5d1c6b22dd3c8781c7cadd590f2121dfb8328cc 100644 (file)
 
 #include "tmon.h"
 
+#define min(x, y) ({                           \
+       typeof(x) _min1 = (x);                  \
+       typeof(y) _min2 = (y);                  \
+       (void) (&_min1 == &_min2);              \
+       _min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({                           \
+       typeof(x) _max1 = (x);                  \
+       typeof(y) _max2 = (y);                  \
+       (void) (&_max1 == &_max2);              \
+       _max1 > _max2 ? _max1 : _max2; })
+
 static PANEL *data_panel;
 static PANEL *dialogue_panel;
 static PANEL *top;
@@ -98,6 +110,18 @@ void write_status_bar(int x, char *line)
        wrefresh(status_bar_window);
 }
 
+/* wrap at 5 */
+#define DIAG_DEV_ROWS  5
+/*
+ * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit
+ */
+static int diag_dev_rows(void)
+{
+       int entries = ptdata.nr_cooling_dev + 1;
+       int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2);
+       return min(rows, entries);
+}
+
 void setup_windows(void)
 {
        int y_begin = 1;
@@ -122,7 +146,7 @@ void setup_windows(void)
         * dialogue window is a pop-up, when needed it lays on top of cdev win
         */
 
-       dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50,
+       dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50,
                                DIAG_Y, DIAG_X);
 
        thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor *
@@ -258,21 +282,26 @@ void show_cooling_device(void)
 }
 
 const char DIAG_TITLE[] = "[ TUNABLES ]";
-#define DIAG_DEV_ROWS  5
 void show_dialogue(void)
 {
        int j, x = 0, y = 0;
+       int rows, cols;
        WINDOW *w = dialogue_window;
 
        if (tui_disabled || !w)
                return;
 
+       getmaxyx(w, rows, cols);
+
+       /* Silence compiler 'unused' warnings */
+       (void)cols;
+
        werase(w);
        box(w, 0, 0);
        mvwprintw(w, 0, maxx/4, DIAG_TITLE);
        /* list all the available tunables */
        for (j = 0; j <= ptdata.nr_cooling_dev; j++) {
-               y = j % DIAG_DEV_ROWS;
+               y = j % diag_dev_rows();
                if (y == 0 && j != 0)
                        x += 20;
                if (j == ptdata.nr_cooling_dev)
@@ -283,12 +312,10 @@ void show_dialogue(void)
                                ptdata.cdi[j].type, ptdata.cdi[j].instance);
        }
        wattron(w, A_BOLD);
-       mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?");
+       mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?");
        wattroff(w, A_BOLD);
-       /* y size of dialogue win is nr cdev + 5, so print legend
-        * at the bottom line
-        */
-       mvwprintw(w, ptdata.nr_cooling_dev+3, 1,
+       /* print legend at the bottom line */
+       mvwprintw(w, rows - 2, 1,
                "Legend: A=Active, P=Passive, C=Critical");
 
        wrefresh(dialogue_window);
@@ -437,7 +464,7 @@ static void handle_input_choice(int ch)
                        snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ",
                                ptdata.cdi[cdev_id].type,
                                ptdata.cdi[cdev_id].instance);
-               write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2);
+               write_dialogue_win(buf, diag_dev_rows() + 2, 2);
                handle_input_val(cdev_id);
        } else {
                snprintf(buf, sizeof(buf), "Invalid selection %d", ch);
This page took 3.005111 seconds and 5 git commands to generate.