Merge tag 'v3.18-rockchip-clk2' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorMike Turquette <mturquette@linaro.org>
Wed, 1 Oct 2014 18:19:10 +0000 (11:19 -0700)
committerMike Turquette <mturquette@linaro.org>
Wed, 1 Oct 2014 18:19:10 +0000 (11:19 -0700)
Allow parent rate changes for i2s on rk3288
and rockchip as well as s3c24xx restart handlers.

540 files changed:
Documentation/SubmittingPatches
Documentation/devicetree/bindings/clock/exynos3250-clock.txt
Documentation/devicetree/bindings/clock/gpio-gate-clock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/pxa-clock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/dma/rcar-audmapp.txt
Documentation/devicetree/bindings/input/atmel,maxtouch.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/regulator/tps65090.txt
Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
Documentation/devicetree/bindings/usb/mxs-phy.txt
Documentation/devicetree/bindings/video/analog-tv-connector.txt
Documentation/filesystems/nfs/nfs-rdma.txt
Documentation/filesystems/seq_file.txt
Documentation/gpio/consumer.txt
Documentation/i2c/dev-interface
Documentation/kernel-parameters.txt
Documentation/misc-devices/lis3lv02d
Documentation/power/regulator/consumer.txt
Documentation/power/regulator/design.txt
Documentation/power/regulator/machine.txt
Documentation/power/regulator/overview.txt
Documentation/power/regulator/regulator.txt
MAINTAINERS
Makefile
arch/arc/mm/cache_arc700.c
arch/arm/Kconfig
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/at91rm9200.dtsi
arch/arm/boot/dts/at91sam9g20.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/exynos3250.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3xxx-clocks.dtsi
arch/arm/boot/dts/pxa27x.dtsi
arch/arm/boot/dts/ste-snowball.dts
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/common/edma.c
arch/arm/include/asm/xen/page-coherent.h
arch/arm/include/asm/xen/page.h
arch/arm/kvm/handle_exit.c
arch/arm/kvm/init.S
arch/arm/mach-at91/board-dt-rm9200.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-pxa/Makefile
arch/arm/mach-pxa/include/mach/pxa2xx-regs.h
arch/arm/mach-pxa/pxa27x.c
arch/arm/xen/Makefile
arch/arm/xen/enlighten.c
arch/arm/xen/mm32.c [new file with mode: 0644]
arch/arm/xen/p2m.c
arch/arm64/crypto/sha2-ce-glue.c
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/ptrace.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/head.S
arch/arm64/kernel/irq.c
arch/arm64/kernel/perf_regs.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/sys_compat.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp-init.S
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/microblaze/Kconfig
arch/microblaze/include/asm/entry.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/include/asm/unistd.h
arch/parisc/Kconfig
arch/parisc/hpux/sys_hpux.c
arch/parisc/include/asm/seccomp.h [new file with mode: 0644]
arch/parisc/include/asm/thread_info.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/powerpc/configs/cell_defconfig
arch/powerpc/configs/celleb_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/g5_defconfig
arch/powerpc/configs/maple_defconfig
arch/powerpc/configs/pasemi_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ps3_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_le_defconfig
arch/powerpc/include/asm/ptrace.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/perf/callchain.c
arch/powerpc/platforms/powernv/opal-hmi.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/s390/include/asm/ipl.h
arch/s390/include/asm/pgtable.h
arch/s390/kernel/ipl.c
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgtable.c
arch/sh/mm/gup.c
arch/x86/Kconfig
arch/x86/include/asm/bitops.h
arch/x86/include/asm/pgtable_64.h
arch/x86/xen/mmu.c
block/blk-merge.c
block/blk-mq.c
block/blk-sysfs.c
block/genhd.c
block/partition-generic.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/verify_pefile.c
drivers/acpi/acpi_cmos_rtc.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/nsprepkg.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/ata/ahci.c
drivers/ata/ahci_tegra.c
drivers/ata/ahci_xgene.c
drivers/ata/ata_piix.c
drivers/ata/pata_jmicron.c
drivers/base/regmap/internal.h
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap.c
drivers/bcma/host_pci.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/bus/arm-ccn.c
drivers/clk/Makefile
drivers/clk/clk-gate.c
drivers/clk/clk-gpio-gate.c [new file with mode: 0644]
drivers/clk/clk.c
drivers/clk/hisilicon/clk-hix5hd2.c
drivers/clk/mvebu/armada-370.c
drivers/clk/mvebu/armada-375.c
drivers/clk/mvebu/common.c
drivers/clk/mvebu/common.h
drivers/clk/pxa/Makefile [new file with mode: 0644]
drivers/clk/pxa/clk-pxa.c [new file with mode: 0644]
drivers/clk/pxa/clk-pxa.h [new file with mode: 0644]
drivers/clk/pxa/clk-pxa27x.c [new file with mode: 0644]
drivers/clk/qcom/clk-pll.c
drivers/clk/qcom/clk-pll.h
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg.h
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/common.c
drivers/clk/qcom/common.h
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/qcom/mmcc-msm8960.c
drivers/clk/samsung/clk-exynos3250.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos5260.c
drivers/clk/sunxi/Makefile
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sun8i-mbus.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/clk.c
drivers/clk/ti/clockdomain.c
drivers/clk/ti/divider.c
drivers/cpufreq/cpufreq_opp.c
drivers/cpufreq/intel_pstate.c
drivers/dma/dma-jz4740.c
drivers/gpio/gpio-bt8xx.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/core/core/parent.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/hwmon/ds1621.c
drivers/i2c/busses/i2c-at91.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/input/input-mt.c
drivers/input/keyboard/cap1106.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/mouse/synaptics_usb.c
drivers/input/mouse/trackpoint.c
drivers/input/serio/i8042-sparcio.h
drivers/input/serio/serport.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/wm9712.c
drivers/input/touchscreen/wm9713.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/iommu.c
drivers/irqchip/exynos-combiner.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/leds/led-class.c
drivers/leds/led-core.c
drivers/md/dm-cache-target.c
drivers/misc/lattice-ecp3-config.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/aeroflex/greth.h
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
drivers/net/fddi/skfp/h/skfbi.h
drivers/net/phy/phy.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath9k/spectral.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/xen-netback/interface.c
drivers/ntb/ntb_transport.c
drivers/parisc/dino.c
drivers/pci/host/Kconfig
drivers/phy/Kconfig
drivers/phy/phy-exynos5-usbdrd.c
drivers/phy/phy-twl4030-usb.c
drivers/pinctrl/pinctrl-baytrail.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/powercap/intel_rapl.c
drivers/s390/block/dasd_devmap.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_sys.c
drivers/ssb/b43_pci_bridge.c
drivers/staging/android/sync.c
drivers/staging/imx-drm/imx-ldb.c
drivers/staging/imx-drm/ipuv3-plane.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/usb/chipidea/ci_hdrc_msm.c
drivers/usb/core/hub.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_fs.h
drivers/usb/gadget/udc/fusb300_udc.h
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/pipe.c
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/sierra.c
drivers/usb/serial/zte_ev.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_devs.h
drivers/uwb/lc-dev.c
drivers/video/fbdev/amba-clcd.c
drivers/xen/balloon.c
drivers/xen/gntalloc.c
drivers/xen/manage.c
fs/aio.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/cifs/Kconfig
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb2file.c
fs/cifs/smb2inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/dcache.c
fs/eventpoll.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/f2fs/Kconfig
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/gc.h
fs/f2fs/hash.c
fs/f2fs/inline.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/lockd/svc.c
fs/namei.c
fs/namespace.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/notify/fdinfo.c
fs/pnode.c
fs/sync.c
fs/udf/ialloc.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/ufs/inode.c
fs/ufs/namei.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_file.c
include/acpi/acpi_bus.h
include/dt-bindings/clock/exynos3250.h
include/dt-bindings/clock/exynos4.h
include/dt-bindings/clock/hix5hd2-clock.h
include/dt-bindings/clock/pxa-clock.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car.h
include/linux/clk-private.h
include/linux/clk-provider.h
include/linux/clk.h
include/linux/clk/ti.h
include/linux/dcache.h
include/linux/f2fs_fs.h
include/linux/gpio/consumer.h
include/linux/hash.h
include/linux/jiffies.h
include/linux/leds.h
include/linux/mlx4/device.h
include/linux/mtd/nand.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/pm_domain.h
include/linux/regulator/driver.h
include/linux/regulator/machine.h
include/linux/tick.h
include/net/bluetooth/hci_core.h
include/net/netns/ieee802154_6lowpan.h
include/net/regulatory.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/wimax.h
include/sound/soc.h
include/trace/events/irq.h
include/uapi/linux/Kbuild
include/uapi/linux/input.h
include/xen/interface/features.h
kernel/cgroup.c
kernel/compat.c
kernel/futex.c
kernel/irq/chip.c
kernel/kcmp.c
kernel/power/power.h
kernel/power/suspend.c
kernel/power/suspend_test.c
kernel/printk/printk.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/time/alarmtimer.c
kernel/time/tick-sched.c
kernel/time/time.c
kernel/time/timekeeping.c
lib/Kconfig
lib/assoc_array.c
lib/hweight.c
lib/string.c
mm/memblock.c
mm/memcontrol.c
mm/mmap.c
mm/nobootmem.c
mm/percpu-vm.c
mm/percpu.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/ceph/auth_x.c
net/ceph/mon_client.c
net/core/datagram.c
net/core/dev.c
net/core/gen_estimator.c
net/core/gen_stats.c
net/core/skbuff.c
net/core/sock.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/reassembly.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv6/addrconf.c
net/ipv6/anycast.c
net/ipv6/mcast.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/l2tp/l2tp_ppp.c
net/mac80211/chan.c
net/mac80211/debugfs_sta.c
net/mac80211/iface.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/sta_info.c
net/mac802154/wpan.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/xt_cgroup.c
net/openvswitch/datapath.c
net/rfkill/rfkill-gpio.c
net/sctp/socket.c
net/socket.c
scripts/checkpatch.pl
security/keys/key.c
sound/firewire/amdtp.c
sound/firewire/amdtp.h
sound/firewire/dice.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/da732x.h
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5677.c
sound/soc/generic/simple-card.c
sound/soc/omap/omap-twl4030.c
sound/soc/sh/rcar/gen.c
sound/soc/soc-core.c
sound/soc/tegra/tegra_asoc_utils.h
tools/usb/usbip/libsrc/usbip_common.h

index 0a523c9a5ff4ec2fee6eefd5e5acef87944460a2..482c74947de02939eebfca089a344373707bf69d 100644 (file)
@@ -794,6 +794,7 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
   <http://www.kroah.com/log/linux/maintainer-03.html>
   <http://www.kroah.com/log/linux/maintainer-04.html>
   <http://www.kroah.com/log/linux/maintainer-05.html>
+  <http://www.kroah.com/log/linux/maintainer-06.html>
 
 NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
   <https://lkml.org/lkml/2005/7/11/336>
index aadc9c59e2d17ea31e2ccdc82a3c688de4106a66..f57d9dd9ea8530ae9c8d02723b1bc15d38002497 100644 (file)
@@ -7,6 +7,8 @@ Required Properties:
 
 - compatible: should be one of the following.
   - "samsung,exynos3250-cmu" - controller compatible with Exynos3250 SoC.
+  - "samsung,exynos3250-cmu-dmc" - controller compatible with
+    Exynos3250 SoC for Dynamic Memory Controller domain.
 
 - reg: physical base address of the controller and length of memory mapped
   region.
@@ -20,7 +22,7 @@ All available clocks are defined as preprocessor macros in
 dt-bindings/clock/exynos3250.h header and can be used in device
 tree sources.
 
-Example 1: An example of a clock controller node is listed below.
+Example 1: Examples of clock controller nodes are listed below.
 
        cmu: clock-controller@10030000 {
                compatible = "samsung,exynos3250-cmu";
@@ -28,6 +30,12 @@ Example 1: An example of a clock controller node is listed below.
                #clock-cells = <1>;
        };
 
+       cmu_dmc: clock-controller@105C0000 {
+               compatible = "samsung,exynos3250-cmu-dmc";
+               reg = <0x105C0000 0x2000>;
+               #clock-cells = <1>;
+       };
+
 Example 2: UART controller node that consumes the clock generated by the clock
           controller. Refer to the standard clock bindings for information
           about 'clocks' and 'clock-names' property.
diff --git a/Documentation/devicetree/bindings/clock/gpio-gate-clock.txt b/Documentation/devicetree/bindings/clock/gpio-gate-clock.txt
new file mode 100644 (file)
index 0000000..d3379ff
--- /dev/null
@@ -0,0 +1,21 @@
+Binding for simple gpio gated clock.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "gpio-gate-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- enable-gpios : GPIO reference for enabling and disabling the clock.
+
+Optional properties:
+- clocks: Maximum of one parent clock is supported.
+
+Example:
+       clock {
+               compatible = "gpio-gate-clock";
+               clocks = <&parentclk>;
+               #clock-cells = <0>;
+               enable-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/pxa-clock.txt b/Documentation/devicetree/bindings/clock/pxa-clock.txt
new file mode 100644 (file)
index 0000000..4b4a902
--- /dev/null
@@ -0,0 +1,16 @@
+* Clock bindings for Marvell PXA chips
+
+Required properties:
+- compatible: Should be "marvell,pxa-clocks"
+- #clock-cells: Should be <1>
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell (see include/.../pxa-clock.h).
+
+Examples:
+
+pxa2xx_clks: pxa2xx_clks@41300004 {
+        compatible = "marvell,pxa-clocks";
+        #clock-cells = <1>;
+        status = "okay";
+};
index d3a5c3c6d6774602667a69e9e93e7511039ef3f4..ed116df9c3e769a2ceef9cb4d1dd1eaf44473f38 100644 (file)
@@ -46,7 +46,11 @@ Required properties:
        "allwinner,sun6i-a31-apb2-div-clk" - for the APB2 gates on A31
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
+       "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
+       "allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10
+       "allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10
        "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
+       "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
        "allwinner,sun7i-a20-out-clk" - for the external output clocks
        "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
        "allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20
index 9f1d750d76de9ff0d9858366e037a9635f353087..61bca509d7b99f734d1c7f054587aaa29dff8261 100644 (file)
@@ -16,9 +16,9 @@ Example:
 * DMA client
 
 Required properties:
-- dmas:                a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs,
-               where SRS/DRS values are fixed handles, specified in the SoC
-               manual as the value that would be written into the PDMACHCR.
+- dmas:                a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs.
+               where SRS/DRS are specified in the SoC manual.
+               It will be written into PDMACHCR as high 16-bit parts.
 - dma-names:   a list of DMA channel names, one per "dmas" entry
 
 Example:
index baef432e83690af95ed6e3b49c1636c90f8ba135..0ac23f2ed1040c2c9f820677fdf3f5092a7c1a6d 100644 (file)
@@ -15,6 +15,17 @@ Optional properties for main touchpad device:
     keycode generated by each GPIO. Linux keycodes are defined in
     <dt-bindings/input/input.h>.
 
+- linux,gpio-keymap: When enabled, the SPT_GPIOPWN_T19 object sends messages
+    on GPIO bit changes. An array of up to 8 entries can be provided
+    indicating the Linux keycode mapped to each bit of the status byte,
+    starting at the LSB. Linux keycodes are defined in
+    <dt-bindings/input/input.h>.
+
+    Note: the numbering of the GPIOs and the bit they start at varies between
+    maXTouch devices. You must either refer to the documentation, or
+    experiment to determine which bit corresponds to which input. Use
+    KEY_RESERVED for unused padding values.
+
 Example:
 
        touch@4b {
index 9b03c57563a49d2145fb47c2a5086ff71506fe3f..e45ac3f926b164636b73e4280cf40ddfde314203 100644 (file)
@@ -39,6 +39,10 @@ Optional properties:
   further clocks may be specified in derived bindings.
 - clock-names: One name for each entry in the clocks property, the
   first one should be "stmmaceth".
+- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
+  available this clock is used for programming the Timestamp Addend Register.
+  If not passed then the system clock will be used and this is fine on some
+  platforms.
 
 Examples:
 
index 340980239ea9f7e1fb35f59da3ebcc0bf5b96ef8..ca69f5e3040cfa48299682dd6371f99c90b49ffa 100644 (file)
@@ -45,8 +45,8 @@ Example:
                infet5-supply = <&some_reg>;
                infet6-supply = <&some_reg>;
                infet7-supply = <&some_reg>;
-               vsys_l1-supply = <&some_reg>;
-               vsys_l2-supply = <&some_reg>;
+               vsys-l1-supply = <&some_reg>;
+               vsys-l2-supply = <&some_reg>;
 
                regulators {
                        dcdc1 {
index 46f344965313f1081dbd7894e738795a8b935815..4eb7997674a09006dfa0a991b634082ddc64974d 100644 (file)
@@ -1,7 +1,7 @@
 ADI AXI-SPDIF controller
 
 Required properties:
- - compatible : Must be "adi,axi-spdif-1.00.a"
+ - compatible : Must be "adi,axi-spdif-tx-1.00.a"
  - reg : Must contain SPDIF core's registers location and length
  - clocks : Pairs of phandle and specifier referencing the controller's clocks.
    The controller expects two clocks, the clock used for the AXI interface and
index cef181a9d8bd6506699cd98889fd36f8f6bbc306..96681c93b86df38788327df42c543738293265ea 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        * "fsl,imx23-usbphy" for imx23 and imx28
        * "fsl,imx6q-usbphy" for imx6dq and imx6dl
        * "fsl,imx6sl-usbphy" for imx6sl
+       * "fsl,imx6sx-usbphy" for imx6sx
   "fsl,imx23-usbphy" is still a fallback for other strings
 - reg: Should contain registers location and length
 - interrupts: Should contain phy interrupt
index 0218fcdc12994ea7f80ee1ce6246fde733a4b1a3..0c0970c210ab7be0c2a52ba4a977a0de8ddd0d5a 100644 (file)
@@ -2,7 +2,7 @@ Analog TV Connector
 ===================
 
 Required properties:
-- compatible: "composite-connector" or "svideo-connector"
+- compatible: "composite-video-connector" or "svideo-connector"
 
 Optional properties:
 - label: a symbolic name for the connector
@@ -14,7 +14,7 @@ Example
 -------
 
 tv: connector {
-       compatible = "composite-connector";
+       compatible = "composite-video-connector";
        label = "tv";
 
        port {
index e386f7e4bcee1e4c8d5b0f08872c877097e6f9f9..724043858b0834f874aff57df4155f1da3dffad8 100644 (file)
@@ -138,9 +138,9 @@ Installation
   - Build, install, reboot
 
     The NFS/RDMA code will be enabled automatically if NFS and RDMA
-    are turned on. The NFS/RDMA client and server are configured via the hidden
-    SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The
-    value of SUNRPC_XPRT_RDMA will be:
+    are turned on. The NFS/RDMA client and server are configured via the
+    SUNRPC_XPRT_RDMA_CLIENT and SUNRPC_XPRT_RDMA_SERVER config options that both
+    depend on SUNRPC and INFINIBAND. The default value of both options will be:
 
      - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client
        and server will not be built
@@ -235,8 +235,9 @@ NFS/RDMA Setup
 
   - Start the NFS server
 
-    If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
-    kernel config), load the RDMA transport module:
+    If the NFS/RDMA server was built as a module
+    (CONFIG_SUNRPC_XPRT_RDMA_SERVER=m in kernel config), load the RDMA
+    transport module:
 
     $ modprobe svcrdma
 
@@ -255,8 +256,9 @@ NFS/RDMA Setup
 
   - On the client system
 
-    If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
-    kernel config), load the RDMA client module:
+    If the NFS/RDMA client was built as a module
+    (CONFIG_SUNRPC_XPRT_RDMA_CLIENT=m in kernel config), load the RDMA client
+    module:
 
     $ modprobe xprtrdma.ko
 
index 1fe0ccb1af553b8307999bb0a7cb997326cc44ad..8ea3e90ace07a945ef6fd512396f18ef259754a2 100644 (file)
@@ -235,6 +235,39 @@ be used for more than one file, you can store an arbitrary pointer in the
 private field of the seq_file structure; that value can then be retrieved
 by the iterator functions.
 
+There is also a wrapper function to seq_open() called seq_open_private(). It
+kmallocs a zero filled block of memory and stores a pointer to it in the
+private field of the seq_file structure, returning 0 on success. The
+block size is specified in a third parameter to the function, e.g.:
+
+       static int ct_open(struct inode *inode, struct file *file)
+       {
+               return seq_open_private(file, &ct_seq_ops,
+                                       sizeof(struct mystruct));
+       }
+
+There is also a variant function, __seq_open_private(), which is functionally
+identical except that, if successful, it returns the pointer to the allocated
+memory block, allowing further initialisation e.g.:
+
+       static int ct_open(struct inode *inode, struct file *file)
+       {
+               struct mystruct *p =
+                       __seq_open_private(file, &ct_seq_ops, sizeof(*p));
+
+               if (!p)
+                       return -ENOMEM;
+
+               p->foo = bar; /* initialize my stuff */
+                       ...
+               p->baz = true;
+
+               return 0;
+       }
+
+A corresponding close function, seq_release_private() is available which
+frees the memory allocated in the corresponding open.
+
 The other operations of interest - read(), llseek(), and release() - are
 all implemented by the seq_file code itself. So a virtual file's
 file_operations structure will look like:
index 76546324e968cf70394144beb86817c16586ceb3..6ce544191ca6b6d648cc2d66ed3df8fb7191d871 100644 (file)
@@ -53,7 +53,20 @@ with IS_ERR() (they will never return a NULL pointer). -ENOENT will be returned
 if and only if no GPIO has been assigned to the device/function/index triplet,
 other error codes are used for cases where a GPIO has been assigned but an error
 occurred while trying to acquire it. This is useful to discriminate between mere
-errors and an absence of GPIO for optional GPIO parameters.
+errors and an absence of GPIO for optional GPIO parameters. For the common
+pattern where a GPIO is optional, the gpiod_get_optional() and
+gpiod_get_index_optional() functions can be used. These functions return NULL
+instead of -ENOENT if no GPIO has been assigned to the requested function:
+
+
+       struct gpio_desc *gpiod_get_optional(struct device *dev,
+                                            const char *con_id,
+                                            enum gpiod_flags flags)
+
+       struct gpio_desc *gpiod_get_index_optional(struct device *dev,
+                                                  const char *con_id,
+                                                  unsigned int index,
+                                                  enum gpiod_flags flags)
 
 Device-managed variants of these functions are also defined:
 
@@ -65,6 +78,15 @@ Device-managed variants of these functions are also defined:
                                               unsigned int idx,
                                               enum gpiod_flags flags)
 
+       struct gpio_desc *devm_gpiod_get_optional(struct device *dev,
+                                                 const char *con_id,
+                                                 enum gpiod_flags flags)
+
+       struct gpio_desc * devm_gpiod_get_index_optional(struct device *dev,
+                                                       const char *con_id,
+                                                       unsigned int index,
+                                                       enum gpiod_flags flags)
+
 A GPIO descriptor can be disposed of using the gpiod_put() function:
 
        void gpiod_put(struct gpio_desc *desc)
index 3e742ba25536123dc4108c6eb2db860584d9b442..2ac78ae1039de5c6bf44b4b32e41de7d742be1d6 100644 (file)
@@ -57,12 +57,12 @@ Well, you are all set up now. You can now use SMBus commands or plain
 I2C to communicate with your device. SMBus commands are preferred if
 the device supports them. Both are illustrated below.
 
-  __u8 register = 0x10; /* Device register to access */
+  __u8 reg = 0x10; /* Device register to access */
   __s32 res;
   char buf[10];
 
   /* Using SMBus commands */
-  res = i2c_smbus_read_word_data(file, register);
+  res = i2c_smbus_read_word_data(file, reg);
   if (res < 0) {
     /* ERROR HANDLING: i2c transaction failed */
   } else {
@@ -70,11 +70,11 @@ the device supports them. Both are illustrated below.
   }
 
   /* Using I2C Write, equivalent of 
-     i2c_smbus_write_word_data(file, register, 0x6543) */
-  buf[0] = register;
+     i2c_smbus_write_word_data(file, reg, 0x6543) */
+  buf[0] = reg;
   buf[1] = 0x43;
   buf[2] = 0x65;
-  if (write(file, buf, 3) ! =3) {
+  if (write(file, buf, 3) !3) {
     /* ERROR HANDLING: i2c transaction failed */
   }
 
index 5ae8608ca9f58a6331cae454ae7a0c84ab2f6c47..0ce01fb286c4fe103b8522f78635f22890395e87 100644 (file)
@@ -605,11 +605,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        See Documentation/s390/CommonIO for details.
        clk_ignore_unused
                        [CLK]
-                       Keep all clocks already enabled by bootloader on,
-                       even if no driver has claimed them. This is useful
-                       for debug and development, but should not be
-                       needed on a platform with proper driver support.
-                       For more information, see Documentation/clk.txt.
+                       Prevents the clock framework from automatically gating
+                       clocks that have not been explicitly enabled by a Linux
+                       device driver but are enabled in hardware at reset or
+                       by the bootloader/firmware. Note that this does not
+                       force such clocks to be always-on nor does it reserve
+                       those clocks in any way. This parameter is useful for
+                       debug and development, but should not be needed on a
+                       platform with proper driver support.  For more
+                       information, see Documentation/clk.txt.
 
        clock=          [BUGS=X86-32, HW] gettimeofday clocksource override.
                        [Deprecated]
@@ -3541,6 +3545,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        bogus residue values);
                                s = SINGLE_LUN (the device has only one
                                        Logical Unit);
+                               u = IGNORE_UAS (don't bind to the uas driver);
                                w = NO_WP_DETECT (don't test whether the
                                        medium is write-protected).
                        Example: quirks=0419:aaf5:rl,0421:0433:rc
index af815b9ba413afc6ba3e21beac2d613e771e0bdb..f89960a0ff95e6e0c5e870f67c836be6adb9e0e6 100644 (file)
@@ -59,7 +59,7 @@ acts similar to /dev/rtc and reacts on free-fall interrupts received
 from the device. It supports blocking operations, poll/select and
 fasync operation modes. You must read 1 bytes from the device.  The
 result is number of free-fall interrupts since the last successful
-read (or 255 if number of interrupts would not fit). See the hpfall.c
+read (or 255 if number of interrupts would not fit). See the freefall.c
 file for an example on using the device.
 
 
index 81c0e2b49cd86cb9096c72c56d46935677c2d990..8afb236ca7653ed694af2b35e49eadbabd496152 100644 (file)
@@ -143,8 +143,9 @@ This will cause the core to recalculate the total load on the regulator (based
 on all its consumers) and change operating mode (if necessary and permitted)
 to best match the current operating load.
 
-The load_uA value can be determined from the consumers datasheet. e.g.most
-datasheets have tables showing the max current consumed in certain situations.
+The load_uA value can be determined from the consumer's datasheet. e.g. most
+datasheets have tables showing the maximum current consumed in certain
+situations.
 
 Most consumers will use indirect operating mode control since they have no
 knowledge of the regulator or whether the regulator is shared with other
@@ -173,7 +174,7 @@ Consumers can register interest in regulator events by calling :-
 int regulator_register_notifier(struct regulator *regulator,
                              struct notifier_block *nb);
 
-Consumers can uregister interest by calling :-
+Consumers can unregister interest by calling :-
 
 int regulator_unregister_notifier(struct regulator *regulator,
                                struct notifier_block *nb);
index f9b56b72b782f0350dc6e50f12d6fad76edd87cd..fdd919b96830b882e54c31d4c7b1f5f468624a5e 100644 (file)
@@ -9,14 +9,14 @@ Safety
 
  - Errors in regulator configuration can have very serious consequences
    for the system, potentially including lasting hardware damage.
- - It is not possible to automatically determine the power confugration
+ - It is not possible to automatically determine the power configuration
    of the system - software-equivalent variants of the same chip may
-   have different power requirments, and not all components with power
+   have different power requirements, and not all components with power
    requirements are visible to software.
 
   => The API should make no changes to the hardware state unless it has
-     specific knowledge that these changes are safe to do perform on
-     this particular system.
+     specific knowledge that these changes are safe to perform on this
+     particular system.
 
 Consumer use cases
 ------------------
index ce63af0a8e35ecab32e2f326d13a9a2b33b62909..757e3b53dc11a8acbb4d048487caab90b0ea8ebd 100644 (file)
@@ -11,7 +11,7 @@ Consider the following machine :-
                +-> [Consumer B @ 3.3V]
 
 The drivers for consumers A & B must be mapped to the correct regulator in
-order to control their power supply. This mapping can be achieved in machine
+order to control their power supplies. This mapping can be achieved in machine
 initialisation code by creating a struct regulator_consumer_supply for
 each regulator.
 
@@ -39,7 +39,7 @@ to the 'Vcc' supply for Consumer A.
 
 Constraints can now be registered by defining a struct regulator_init_data
 for each regulator power domain. This structure also maps the consumers
-to their supply regulator :-
+to their supply regulators :-
 
 static struct regulator_init_data regulator1_data = {
        .constraints = {
index 8ed17587a74bdc006b2d2922b5709f0e16ec08ad..40ca2d6e2742d182933443012aad9f6157eac361 100644 (file)
@@ -36,11 +36,11 @@ Some terms used in this document:-
                    Consumers can be classified into two types:-
 
                    Static: consumer does not change its supply voltage or
-                   current limit. It only needs to enable or disable it's
+                   current limit. It only needs to enable or disable its
                    power supply. Its supply voltage is set by the hardware,
                    bootloader, firmware or kernel board initialisation code.
 
-                   Dynamic: consumer needs to change it's supply voltage or
+                   Dynamic: consumer needs to change its supply voltage or
                    current limit to meet operation demands.
 
 
@@ -156,7 +156,7 @@ relevant to non SoC devices and is split into the following four interfaces:-
       This interface is for machine specific code and allows the creation of
       voltage/current domains (with constraints) for each regulator. It can
       provide regulator constraints that will prevent device damage through
-      overvoltage or over current caused by buggy client drivers. It also
+      overvoltage or overcurrent caused by buggy client drivers. It also
       allows the creation of a regulator tree whereby some regulators are
       supplied by others (similar to a clock tree).
 
index 13902778ae44ceb31f4f0c1c5dc173f63ffd9b8a..b17e5833ce214f963ed9af87f6f7fcf0e2c5ac18 100644 (file)
@@ -13,7 +13,7 @@ Drivers can register a regulator by calling :-
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
                                         const struct regulator_config *config);
 
-This will register the regulators capabilities and operations to the regulator
+This will register the regulator's capabilities and operations to the regulator
 core.
 
 Regulators can be unregistered by calling :-
@@ -23,8 +23,8 @@ void regulator_unregister(struct regulator_dev *rdev);
 
 Regulator Events
 ================
-Regulators can send events (e.g. over temp, under voltage, etc) to consumer
-drivers by calling :-
+Regulators can send events (e.g. overtemperature, undervoltage, etc) to
+consumer drivers by calling :-
 
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
                                  unsigned long event, void *data);
index cf24bb56bab954f2ca4531d9a3b58535849497fe..809ecd680d8829f6148ababe654d30a7882b11fe 100644 (file)
@@ -6424,7 +6424,8 @@ F:        Documentation/scsi/NinjaSCSI.txt
 F:     drivers/scsi/nsp32*
 
 NTB DRIVER
-M:     Jon Mason <jon.mason@intel.com>
+M:     Jon Mason <jdmason@kudzu.us>
+M:     Dave Jiang <dave.jiang@intel.com>
 S:     Supported
 W:     https://github.com/jonmason/ntb/wiki
 T:     git git://github.com/jonmason/ntb.git
@@ -7053,7 +7054,7 @@ S:        Maintained
 F:     drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
-M:     Tomasz Figa <t.figa@samsung.com>
+M:     Tomasz Figa <tomasz.figa@gmail.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -7899,7 +7900,8 @@ S:        Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG SOC CLOCK DRIVERS
-M:     Tomasz Figa <t.figa@samsung.com>
+M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
+M:     Tomasz Figa <tomasz.figa@gmail.com>
 S:     Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
@@ -7912,6 +7914,19 @@ S:       Supported
 L:     netdev@vger.kernel.org
 F:     drivers/net/ethernet/samsung/sxgbe/
 
+SAMSUNG USB2 PHY DRIVER
+M:     Kamil Debski <k.debski@samsung.com>
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/phy/samsung-phy.txt
+F:     Documentation/phy/samsung-usb2.txt
+F:     drivers/phy/phy-exynos4210-usb2.c
+F:     drivers/phy/phy-exynos4x12-usb2.c
+F:     drivers/phy/phy-exynos5250-usb2.c
+F:     drivers/phy/phy-s5pv210-usb2.c
+F:     drivers/phy/phy-samsung-usb2.c
+F:     drivers/phy/phy-samsung-usb2.h
+
 SERIAL DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
@@ -10070,9 +10085,9 @@ F:      Documentation/x86/
 F:     arch/x86/
 
 X86 PLATFORM DRIVERS
-M:     Matthew Garrett <matthew.garrett@nebula.com>
+M:     Darren Hart <dvhart@infradead.org>
 L:     platform-driver-x86@vger.kernel.org
-T:     git git://cavan.codon.org.uk/platform-drivers-x86.git
+T:     git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
 S:     Maintained
 F:     drivers/platform/x86/
 
index 2893d7f0fecc6cdb5b4e45f182b20ac103a0d0c0..036b7331d7b4913c99deff17c1e89c4cc0ad4127 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index e88ddbf990e34cfa83bb6a3138124750ecc0762b..9e1142729fd14c003c4f302a8305de1d735ab7a2 100644 (file)
@@ -427,7 +427,7 @@ struct ic_inv_args {
 
 static void __ic_line_inv_vaddr_helper(void *info)
 {
-        struct ic_inv *ic_inv_args = (struct ic_inv_args *) info;
+        struct ic_inv_args *ic_inv = info;
 
         __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
 }
index 32cbbd5659023cffe04faf6c97eb7ee5be196585..5d750155686b35e9fab734886dd0d21a6b66901a 100644 (file)
@@ -621,6 +621,7 @@ config ARCH_PXA
        select ARCH_REQUIRE_GPIOLIB
        select ARM_CPU_SUSPEND if PM
        select AUTO_ZRELADDR
+       select COMMON_CLK if PXA27x
        select CLKDEV_LOOKUP
        select CLKSRC_MMIO
        select CLKSRC_OF
index 9b3d2ba82f13a1b6aaf725c0e529255310726052..8689949bdba3f7fad944beaf77ce9b5efa6dbe59 100644 (file)
 
                        usb1: usb@48390000 {
                                compatible = "synopsys,dwc3";
-                               reg = <0x48390000 0x17000>;
+                               reg = <0x48390000 0x10000>;
                                interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&usb2_phy1>;
                                phy-names = "usb2-phy";
 
                        usb2: usb@483d0000 {
                                compatible = "synopsys,dwc3";
-                               reg = <0x483d0000 0x17000>;
+                               reg = <0x483d0000 0x10000>;
                                interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&usb2_phy2>;
                                phy-names = "usb2-phy";
index 646a6eade788f0114847bfa254f9bb7fcff272be..e7ac47fa6615e33ee52e3ef58025721f49c7eb08 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps65218: tps65218@24 {
                reg = <0x24>;
        ranges = <0 0 0 0x01000000>;    /* minimum GPMC partition = 16MB */
        nand@0,0 {
                reg = <0 0 4>;          /* device IO registers */
-               ti,nand-ecc-opt = "bch8";
+               ti,nand-ecc-opt = "bch16";
                ti,elm-id = <&elm>;
                nand-bus-width = <8>;
                gpmc,device-width = <1>;
                gpmc,rd-cycle-ns = <40>;
                gpmc,wr-cycle-ns = <40>;
                gpmc,wait-pin = <0>;
-               gpmc,wait-on-read;
-               gpmc,wait-on-write;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,clk-activation-ns = <0>;
index ed7dd23959155598aba377c27d727e8dc481c551..ac3e4859935f1ad31da67107eda970eec6f526ba 100644 (file)
 };
 
 &gpmc {
-       status = "okay";
+       status = "okay";        /* Disable QSPI when enabling GPMC (NAND) */
        pinctrl-names = "default";
        pinctrl-0 = <&nand_flash_x8>;
        ranges = <0 0 0x08000000 0x10000000>;   /* CS0: NAND */
        nand@0,0 {
                reg = <0 0 0>; /* CS0, offset 0 */
-               ti,nand-ecc-opt = "bch8";
+               ti,nand-ecc-opt = "bch16";
                ti,elm-id = <&elm>;
                nand-bus-width = <8>;
                gpmc,device-width = <1>;
                gpmc,access-ns = <30>; /* tCEA + 4*/
                gpmc,rd-cycle-ns = <40>;
                gpmc,wr-cycle-ns = <40>;
-               gpmc,wait-on-read = "true";
-               gpmc,wait-on-write = "true";
+               gpmc,wait-pin = <0>;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,clk-activation-ns = <0>;
 };
 
 &qspi {
-       status = "okay";
+       status = "disabled";    /* Disable GPMC (NAND) when enabling QSPI */
        pinctrl-names = "default";
        pinctrl-0 = <&qspi1_default>;
 
index 65ccf564b9a5636eabb7af6cf2697d5c28dccd74..6c97d4af61eec9e36da60ab3257c508e0ef3ba7e 100644 (file)
                                usb: usbck {
                                        compatible = "atmel,at91rm9200-clk-usb";
                                        #clock-cells = <0>;
-                                       atmel,clk-divisors = <1 2>;
+                                       atmel,clk-divisors = <1 2 0 0>;
                                        clocks = <&pllb>;
                                };
 
index 31f7652612fc8f2f1e284b74eb6f9bef7e8e7986..4e0abbd9d6553e71d6881b074196d62dea4ac04b 100644 (file)
@@ -40,6 +40,7 @@
                                };
 
                                pllb: pllbck {
+                                       compatible = "atmel,at91sam9g20-clk-pllb";
                                        atmel,clk-input-range = <2000000 32000000>;
                                        atmel,pll-clk-output-ranges = <30000000 100000000 0 0>;
                                };
index 50f8022905a1f36e415f93f8638055993d5f4b5d..e03fbf3c6889120e9fce89d0fdb39fb70fe7b222 100644 (file)
@@ -8,6 +8,7 @@
 /dts-v1/;
 
 #include "dra74x.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 
 / {
        model = "TI DRA742";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
        };
+
+       vtt_fixed: fixedregulator-vtt {
+               compatible = "regulator-fixed";
+               regulator-name = "vtt_fixed";
+               regulator-min-microvolt = <1350000>;
+               regulator-max-microvolt = <1350000>;
+               regulator-always-on;
+               regulator-boot-on;
+               enable-active-high;
+               gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+       };
 };
 
 &dra7_pmx_core {
+       pinctrl-names = "default";
+       pinctrl-0 = <&vtt_pin>;
+
+       vtt_pin: pinmux_vtt_pin {
+               pinctrl-single,pins = <
+                       0x3b4 (PIN_OUTPUT | MUX_MODE14) /* spi1_cs1.gpio7_11 */
+               >;
+       };
+
        i2c1_pins: pinmux_i2c1_pins {
                pinctrl-single,pins = <
                        0x400 (PIN_INPUT | MUX_MODE0) /* i2c1_sda */
 
        i2c3_pins: pinmux_i2c3_pins {
                pinctrl-single,pins = <
-                       0x410 (PIN_INPUT | MUX_MODE0) /* i2c3_sda */
-                       0x414 (PIN_INPUT | MUX_MODE0) /* i2c3_scl */
+                       0x288 (PIN_INPUT | MUX_MODE9) /* gpio6_14.i2c3_sda */
+                       0x28c (PIN_INPUT | MUX_MODE9) /* gpio6_15.i2c3_scl */
                >;
        };
 
        mcspi1_pins: pinmux_mcspi1_pins {
                pinctrl-single,pins = <
-                       0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */
-                       0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */
-                       0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */
-                       0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
-                       0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */
-                       0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */
-                       0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */
+                       0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */
+                       0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */
+                       0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */
+                       0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */
+                       0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */
+                       0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */
                >;
        };
 
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c3_pins>;
-       clock-frequency = <3400000>;
+       clock-frequency = <400000>;
 };
 
 &mcspi1 {
                        reg = <0x001c0000 0x00020000>;
                };
                partition@7 {
-                       label = "NAND.u-boot-env";
+                       label = "NAND.u-boot-env.backup1";
                        reg = <0x001e0000 0x00020000>;
                };
                partition@8 {
 &usb2_phy2 {
        phy-supply = <&ldousb_reg>;
 };
+
+&gpio7 {
+       ti,no-reset-on-init;
+       ti,no-idle-on-init;
+};
index 1d52de6370d58a65022b0bc59bf2f07516066308..72bf1b5737885cc0f8e7ae26595bc86a05d5031f 100644 (file)
                        #clock-cells = <1>;
                };
 
+               cmu_dmc: clock-controller@105C0000 {
+                       compatible = "samsung,exynos3250-cmu-dmc";
+                       reg = <0x105C0000 0x2000>;
+                       #clock-cells = <1>;
+               };
+
                rtc: rtc@10070000 {
                        compatible = "samsung,s3c6410-rtc";
                        reg = <0x10070000 0x100>;
index 1fe45d1f75ec8d52aa8dd59a1e344a931fc32576..4361777a08d81090cca597271a5f7a6d3b22ea25 100644 (file)
@@ -93,7 +93,7 @@
        };
 
        tv: connector {
-               compatible = "composite-connector";
+               compatible = "composite-video-connector";
                label = "tv";
 
                port {
index e47ff69dcf7053e7bb22d3f5d40e7cd09628687c..5c375003bad106216109b5626264300433dabd1d 100644 (file)
                ti,bit-shift = <0x1e>;
                reg = <0x0d00>;
                ti,set-bit-to-disable;
+               ti,set-rate-parent;
        };
 
        dpll4_m6_ck: dpll4_m6_ck {
index a7054694598594cb91aefb77b791c6f3f1706018..80fc5d7e9ef9685cc04fb8771f6205aaeaf5932a 100644 (file)
@@ -1,5 +1,6 @@
 /* The pxa3xx skeleton simply augments the 2xx version */
-/include/ "pxa2xx.dtsi"
+#include "pxa2xx.dtsi"
+#include "dt-bindings/clock/pxa2xx-clock.h"
 
 / {
        model = "Marvell PXA27x familiy SoC";
                        #pwm-cells = <1>;
                };
        };
+
+       clocks {
+              /*
+               * The muxing of external clocks/internal dividers for osc* clock
+               * sources has been hidden under the carpet by now.
+               */
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               pxa2xx_clks: pxa2xx_clks@41300004 {
+                       compatible = "marvell,pxa-clocks";
+                       #clock-cells = <1>;
+                       status = "okay";
+               };
+       };
+
 };
index 4a2000c620ad7a6a77f3b7aec18efc9a79e3a94e..3e97a669f15ef484107cdef76dbac8051d97202d 100644 (file)
                msp2: msp@80117000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&msp2_default_mode>;
-                       status = "okay";
                };
 
                msp3: msp@80125000 {
index 24b0ad3a7c07f53a2af4bb1e22c3ba4f20e3d03d..82094283473d92e54c67c9e3d2e866e077be6073 100644 (file)
 
                mbus_clk: clk@01c2015c {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       compatible = "allwinner,sun5i-a13-mbus-clk";
                        reg = <0x01c2015c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
                        clock-output-names = "mbus";
index bf86e65dd167bf2aad4131c1060d57698fedfc77..53e0e72a9d327947a6a9cc253db96f94cc60c9a3 100644 (file)
 
                mbus_clk: clk@01c2015c {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       compatible = "allwinner,sun5i-a13-mbus-clk";
                        reg = <0x01c2015c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
                        clock-output-names = "mbus";
index 4011628c738101b0004b6bb9da7f4c0ee3fa1e70..2edef89ef0a58c258c6ebd7908cbf1af742c60c0 100644 (file)
 
                mbus_clk: clk@01c2015c {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       compatible = "allwinner,sun5i-a13-mbus-clk";
                        reg = <0x01c2015c 0x4>;
                        clocks = <&osc24M>, <&pll6 2>, <&pll5 1>;
                        clock-output-names = "mbus";
index 88099175fc56c64be13c5a31e5cf05ed18b60e50..d86771abbf57a3760e65c4718a4fbfc6bad8359b 100644 (file)
@@ -1443,14 +1443,14 @@ void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
 EXPORT_SYMBOL(edma_assign_channel_eventq);
 
 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
-                             struct edma *edma_cc)
+                             struct edma *edma_cc, int cc_id)
 {
        int i;
        u32 value, cccfg;
        s8 (*queue_priority_map)[2];
 
        /* Decode the eDMA3 configuration from CCCFG register */
-       cccfg = edma_read(0, EDMA_CCCFG);
+       cccfg = edma_read(cc_id, EDMA_CCCFG);
 
        value = GET_NUM_REGN(cccfg);
        edma_cc->num_region = BIT(value);
@@ -1464,7 +1464,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
        value = GET_NUM_EVQUE(cccfg);
        edma_cc->num_tc = value + 1;
 
-       dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg);
+       dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
+               cccfg);
        dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
        dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
        dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
@@ -1684,7 +1685,7 @@ static int edma_probe(struct platform_device *pdev)
                        return -ENOMEM;
 
                /* Get eDMA3 configuration from IP */
-               ret = edma_setup_from_hw(dev, info[j], edma_cc[j]);
+               ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
                if (ret)
                        return ret;
 
index 1109017499e52f05e92ca7056831323f4ed330e9..e8275ea88e8806d79b784deef60b57c6cf76a8ad 100644 (file)
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
        __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
-{
-       if (__generic_dma_ops(hwdev)->unmap_page)
-               __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+               struct dma_attrs *attrs);
 
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
-               __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_device)
-               __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index ded062f9b358038c05fa074816706c40619e6408..135c24a5ba262b76529fa1c3dbe088d098de84a5 100644 (file)
@@ -33,7 +33,6 @@ typedef struct xpaddr {
 #define INVALID_P2M_ENTRY      (~0UL)
 
 unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
 extern struct rb_root phys_to_mach;
 
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
 
 static inline unsigned long mfn_to_pfn(unsigned long mfn)
 {
-       unsigned long pfn;
-
-       if (phys_to_mach.rb_node != NULL) {
-               pfn = __mfn_to_pfn(mfn);
-               if (pfn != INVALID_P2M_ENTRY)
-                       return pfn;
-       }
-
        return mfn;
 }
 
index 4c979d466cc1681c4b3efc70623345eee5974b78..a96a8043277c3de69cc460a5b2a3f83a36fc30ed 100644 (file)
@@ -93,6 +93,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
        else
                kvm_vcpu_block(vcpu);
 
+       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
        return 1;
 }
 
index 991415d978b6020a958ac44a886103ee83aa764c..3988e72d16ff9f20efdad72c51dab74b01f559b2 100644 (file)
@@ -99,6 +99,10 @@ __do_hyp_init:
        mrc     p15, 0, r0, c10, c2, 1
        mcr     p15, 4, r0, c10, c2, 1
 
+       @ Invalidate the stale TLBs from Bootloader
+       mcr     p15, 4, r0, c8, c7, 0   @ TLBIALLH
+       dsb     ish
+
        @ Set the HSCTLR to:
        @  - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
        @  - Endianness: Kernel config
index 3a185faee795b589725bffd222e2299eac5cffc3..f4b6e91843e44565257982eb7297101bbdbcd4bb 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/gpio.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/clk-provider.h>
 
 #include <asm/setup.h>
 #include <asm/irq.h>
@@ -35,13 +36,21 @@ static void __init at91rm9200_dt_init_irq(void)
        of_irq_init(irq_of_match);
 }
 
+static void __init at91rm9200_dt_timer_init(void)
+{
+#if defined(CONFIG_COMMON_CLK)
+       of_clk_init(NULL);
+#endif
+       at91rm9200_timer_init();
+}
+
 static const char *at91rm9200_dt_board_compat[] __initdata = {
        "atmel,at91rm9200",
        NULL
 };
 
 DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)")
-       .init_time      = at91rm9200_timer_init,
+       .init_time      = at91rm9200_dt_timer_init,
        .map_io         = at91_map_io,
        .handle_irq     = at91_aic_handle_irq,
        .init_early     = at91rm9200_dt_initialize,
index 9f42d5437fcc52e6cb3693f5dbbab18b29c6aebf..2f97228f188aa3a835146d81bb7bffc2edcb039d 100644 (file)
@@ -1207,8 +1207,7 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
                }
        }
 
-       if ((p->wait_on_read || p->wait_on_write) &&
-           (p->wait_pin > gpmc_nr_waitpins)) {
+       if (p->wait_pin > gpmc_nr_waitpins) {
                pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
                return -EINVAL;
        }
@@ -1288,8 +1287,8 @@ void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
                p->wait_on_write = of_property_read_bool(np,
                                                         "gpmc,wait-on-write");
                if (!p->wait_on_read && !p->wait_on_write)
-                       pr_warn("%s: read/write wait monitoring not enabled!\n",
-                               __func__);
+                       pr_debug("%s: rd/wr wait monitoring not enabled!\n",
+                                __func__);
        }
 }
 
index 5d0667c119f6e6866e23d25d344eb47ddb34472a..a1b82a994842b51c20e4dfccdac7f752c4d740c1 100644 (file)
@@ -734,8 +734,16 @@ int __init omap_clk_init(void)
        ti_clk_init_features();
 
        ret = of_prcm_init();
-       if (!ret)
-               ret = omap_clk_soc_init();
+       if (ret)
+               return ret;
+
+       of_clk_init(NULL);
+
+       ti_dt_clk_init_retry_clks();
+
+       ti_dt_clockdomains_setup();
+
+       ret = omap_clk_soc_init();
 
        return ret;
 }
index 76ca320f007c2158cae69eb494b6215f98acbcbd..3b890807f5e6bcc77d0c72f7168c2bb2fd1c55b1 100644 (file)
@@ -525,8 +525,6 @@ int __init of_prcm_init(void)
                memmap_index++;
        }
 
-       ti_dt_clockdomains_setup();
-
        return 0;
 }
 
index 2fe1824c6dcb515fc1eafe78f659d75f1990116a..f09259a71913fec3d96e2a95888eb8064f7418b2 100644 (file)
@@ -3,16 +3,15 @@
 #
 
 # Common support (must be linked before board specific support)
-obj-y                          += clock.o devices.o generic.o irq.o \
-                                  reset.o
+obj-y                          += devices.o generic.o irq.o reset.o
 obj-$(CONFIG_PM)               += pm.o sleep.o standby.o
 
 # Generic drivers that other drivers may depend upon
 
 # SoC-specific code
-obj-$(CONFIG_PXA25x)           += mfp-pxa2xx.o clock-pxa2xx.o pxa2xx.o pxa25x.o
-obj-$(CONFIG_PXA27x)           += mfp-pxa2xx.o clock-pxa2xx.o pxa2xx.o pxa27x.o
-obj-$(CONFIG_PXA3xx)           += mfp-pxa3xx.o clock-pxa3xx.o pxa3xx.o smemc.o pxa3xx-ulpi.o
+obj-$(CONFIG_PXA25x)           += mfp-pxa2xx.o clock.o clock-pxa2xx.o pxa2xx.o pxa25x.o
+obj-$(CONFIG_PXA27x)           += mfp-pxa2xx.o pxa2xx.o pxa27x.o
+obj-$(CONFIG_PXA3xx)           += mfp-pxa3xx.o clock.o clock-pxa3xx.o pxa3xx.o smemc.o pxa3xx-ulpi.o
 obj-$(CONFIG_CPU_PXA300)       += pxa300.o
 obj-$(CONFIG_CPU_PXA320)       += pxa320.o
 obj-$(CONFIG_CPU_PXA930)       += pxa930.o
index ee6ced1cea7f1705cda89be3bff0b3d51356e4cf..f1dd62946b3699f6f25b7dd2683d98de9df141a1 100644 (file)
 #define CCCR_M_MASK    0x0060  /* Memory Frequency to Run Mode Frequency Multiplier */
 #define CCCR_L_MASK    0x001f  /* Crystal Frequency to Memory Frequency Multiplier */
 
+#define CCCR_CPDIS_BIT (31)
+#define CCCR_PPDIS_BIT (30)
+#define CCCR_LCD_26_BIT        (27)
+#define CCCR_A_BIT     (25)
+
+#define CCSR_N2_MASK   CCCR_N_MASK
+#define CCSR_M_MASK    CCCR_M_MASK
+#define CCSR_L_MASK    CCCR_L_MASK
+#define CCSR_N2_SHIFT  7
+
 #define CKEN_AC97CONF   (31)    /* AC97 Controller Configuration */
 #define CKEN_CAMERA    (24)    /* Camera Interface Clock Enable */
 #define CKEN_SSP1      (23)    /* SSP1 Unit Clock Enable */
index b040d7d1488839085e5fb5fedf5bee29b0b15803..668087f4e4923783efe82cfdab2c82953458221a 100644 (file)
@@ -37,7 +37,8 @@
 
 #include "generic.h"
 #include "devices.h"
-#include "clock.h"
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
 
 void pxa27x_clear_otgph(void)
 {
@@ -73,174 +74,6 @@ void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio)
 }
 EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset);
 
-/* Crystal clock: 13MHz */
-#define BASE_CLK       13000000
-
-/*
- * Get the clock frequency as reflected by CCSR and the turbo flag.
- * We assume these values have been applied via a fcs.
- * If info is not 0 we also display the current settings.
- */
-unsigned int pxa27x_get_clk_frequency_khz(int info)
-{
-       unsigned long ccsr, clkcfg;
-       unsigned int l, L, m, M, n2, N, S;
-               int cccr_a, t, ht, b;
-
-       ccsr = CCSR;
-       cccr_a = CCCR & (1 << 25);
-
-       /* Read clkcfg register: it has turbo, b, half-turbo (and f) */
-       asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
-       t  = clkcfg & (1 << 0);
-       ht = clkcfg & (1 << 2);
-       b  = clkcfg & (1 << 3);
-
-       l  = ccsr & 0x1f;
-       n2 = (ccsr>>7) & 0xf;
-       m  = (l <= 10) ? 1 : (l <= 20) ? 2 : 4;
-
-       L  = l * BASE_CLK;
-       N  = (L * n2) / 2;
-       M  = (!cccr_a) ? (L/m) : ((b) ? L : (L/2));
-       S  = (b) ? L : (L/2);
-
-       if (info) {
-               printk( KERN_INFO "Run Mode clock: %d.%02dMHz (*%d)\n",
-                       L / 1000000, (L % 1000000) / 10000, l );
-               printk( KERN_INFO "Turbo Mode clock: %d.%02dMHz (*%d.%d, %sactive)\n",
-                       N / 1000000, (N % 1000000)/10000, n2 / 2, (n2 % 2)*5,
-                       (t) ? "" : "in" );
-               printk( KERN_INFO "Memory clock: %d.%02dMHz (/%d)\n",
-                       M / 1000000, (M % 1000000) / 10000, m );
-               printk( KERN_INFO "System bus clock: %d.%02dMHz \n",
-                       S / 1000000, (S % 1000000) / 10000 );
-       }
-
-       return (t) ? (N/1000) : (L/1000);
-}
-
-/*
- * Return the current mem clock frequency as reflected by CCCR[A], B, and L
- */
-static unsigned long clk_pxa27x_mem_getrate(struct clk *clk)
-{
-       unsigned long ccsr, clkcfg;
-       unsigned int l, L, m, M;
-               int cccr_a, b;
-
-       ccsr = CCSR;
-       cccr_a = CCCR & (1 << 25);
-
-       /* Read clkcfg register: it has turbo, b, half-turbo (and f) */
-       asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
-       b = clkcfg & (1 << 3);
-
-       l = ccsr & 0x1f;
-       m = (l <= 10) ? 1 : (l <= 20) ? 2 : 4;
-
-       L = l * BASE_CLK;
-       M = (!cccr_a) ? (L/m) : ((b) ? L : (L/2));
-
-       return M;
-}
-
-static const struct clkops clk_pxa27x_mem_ops = {
-       .enable         = clk_dummy_enable,
-       .disable        = clk_dummy_disable,
-       .getrate        = clk_pxa27x_mem_getrate,
-};
-
-/*
- * Return the current LCD clock frequency in units of 10kHz as
- */
-static unsigned int pxa27x_get_lcdclk_frequency_10khz(void)
-{
-       unsigned long ccsr;
-       unsigned int l, L, k, K;
-
-       ccsr = CCSR;
-
-       l = ccsr & 0x1f;
-       k = (l <= 7) ? 1 : (l <= 16) ? 2 : 4;
-
-       L = l * BASE_CLK;
-       K = L / k;
-
-       return (K / 10000);
-}
-
-static unsigned long clk_pxa27x_lcd_getrate(struct clk *clk)
-{
-       return pxa27x_get_lcdclk_frequency_10khz() * 10000;
-}
-
-static const struct clkops clk_pxa27x_lcd_ops = {
-       .enable         = clk_pxa2xx_cken_enable,
-       .disable        = clk_pxa2xx_cken_disable,
-       .getrate        = clk_pxa27x_lcd_getrate,
-};
-
-static DEFINE_PXA2_CKEN(pxa27x_ffuart, FFUART, 14857000, 1);
-static DEFINE_PXA2_CKEN(pxa27x_btuart, BTUART, 14857000, 1);
-static DEFINE_PXA2_CKEN(pxa27x_stuart, STUART, 14857000, 1);
-static DEFINE_PXA2_CKEN(pxa27x_i2s, I2S, 14682000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_i2c, I2C, 32842000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_usb, USB, 48000000, 5);
-static DEFINE_PXA2_CKEN(pxa27x_mmc, MMC, 19500000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_ficp, FICP, 48000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_usbhost, USBHOST, 48000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_pwri2c, PWRI2C, 13000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_keypad, KEYPAD, 32768, 0);
-static DEFINE_PXA2_CKEN(pxa27x_ssp1, SSP1, 13000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_ssp2, SSP2, 13000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_ssp3, SSP3, 13000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_pwm0, PWM0, 13000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_pwm1, PWM1, 13000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_ac97, AC97, 24576000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_ac97conf, AC97CONF, 24576000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_msl, MSL, 48000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_usim, USIM, 48000000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_memstk, MEMSTK, 19500000, 0);
-static DEFINE_PXA2_CKEN(pxa27x_im, IM, 0, 0);
-static DEFINE_PXA2_CKEN(pxa27x_memc, MEMC, 0, 0);
-
-static DEFINE_CK(pxa27x_lcd, LCD, &clk_pxa27x_lcd_ops);
-static DEFINE_CK(pxa27x_camera, CAMERA, &clk_pxa27x_lcd_ops);
-static DEFINE_CLK(pxa27x_mem, &clk_pxa27x_mem_ops, 0, 0);
-
-static struct clk_lookup pxa27x_clkregs[] = {
-       INIT_CLKREG(&clk_pxa27x_lcd, "pxa2xx-fb", NULL),
-       INIT_CLKREG(&clk_pxa27x_camera, "pxa27x-camera.0", NULL),
-       INIT_CLKREG(&clk_pxa27x_ffuart, "pxa2xx-uart.0", NULL),
-       INIT_CLKREG(&clk_pxa27x_btuart, "pxa2xx-uart.1", NULL),
-       INIT_CLKREG(&clk_pxa27x_stuart, "pxa2xx-uart.2", NULL),
-       INIT_CLKREG(&clk_pxa27x_i2s, "pxa2xx-i2s", NULL),
-       INIT_CLKREG(&clk_pxa27x_i2c, "pxa2xx-i2c.0", NULL),
-       INIT_CLKREG(&clk_pxa27x_usb, "pxa27x-udc", NULL),
-       INIT_CLKREG(&clk_pxa27x_mmc, "pxa2xx-mci.0", NULL),
-       INIT_CLKREG(&clk_pxa27x_stuart, "pxa2xx-ir", "UARTCLK"),
-       INIT_CLKREG(&clk_pxa27x_ficp, "pxa2xx-ir", "FICPCLK"),
-       INIT_CLKREG(&clk_pxa27x_usbhost, "pxa27x-ohci", NULL),
-       INIT_CLKREG(&clk_pxa27x_pwri2c, "pxa2xx-i2c.1", NULL),
-       INIT_CLKREG(&clk_pxa27x_keypad, "pxa27x-keypad", NULL),
-       INIT_CLKREG(&clk_pxa27x_ssp1, "pxa27x-ssp.0", NULL),
-       INIT_CLKREG(&clk_pxa27x_ssp2, "pxa27x-ssp.1", NULL),
-       INIT_CLKREG(&clk_pxa27x_ssp3, "pxa27x-ssp.2", NULL),
-       INIT_CLKREG(&clk_pxa27x_pwm0, "pxa27x-pwm.0", NULL),
-       INIT_CLKREG(&clk_pxa27x_pwm1, "pxa27x-pwm.1", NULL),
-       INIT_CLKREG(&clk_pxa27x_ac97, NULL, "AC97CLK"),
-       INIT_CLKREG(&clk_pxa27x_ac97conf, NULL, "AC97CONFCLK"),
-       INIT_CLKREG(&clk_pxa27x_msl, NULL, "MSLCLK"),
-       INIT_CLKREG(&clk_pxa27x_usim, NULL, "USIMCLK"),
-       INIT_CLKREG(&clk_pxa27x_memstk, NULL, "MSTKCLK"),
-       INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
-       INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
-       INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
-       INIT_CLKREG(&clk_dummy, "pxa27x-gpio", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
-};
-
 #ifdef CONFIG_PM
 
 #define SAVE(x)                sleep_save[SLEEP_SAVE_##x] = x
@@ -460,8 +293,6 @@ static int __init pxa27x_init(void)
 
                reset_status = RCSR;
 
-               clkdev_add_table(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs));
-
                if ((ret = pxa_init_dma(IRQ_DMA, 32)))
                        return ret;
 
@@ -469,10 +300,17 @@ static int __init pxa27x_init(void)
 
                register_syscore_ops(&pxa_irq_syscore_ops);
                register_syscore_ops(&pxa2xx_mfp_syscore_ops);
-               register_syscore_ops(&pxa2xx_clock_syscore_ops);
 
-               pxa_register_device(&pxa27x_device_gpio, &pxa27x_gpio_info);
-               ret = platform_add_devices(devices, ARRAY_SIZE(devices));
+               if (!of_have_populated_dt()) {
+                       ret = clk_register_clkdev(NULL, NULL,
+                                                 "pxa27x-gpio");
+                       if (!ret)
+                               pxa_register_device(&pxa27x_device_gpio,
+                                                   &pxa27x_gpio_info);
+                       if (!ret)
+                               ret = platform_add_devices(devices,
+                                                          ARRAY_SIZE(devices));
+               }
        }
 
        return ret;
index 12969523414cf2d0b972bdfb8b3ff50b9c781c8f..1f85bfe6b4704d97e999b748f46a61a362f20d66 100644 (file)
@@ -1 +1 @@
-obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
index 98544c5f86e99710a7845e7a55648cdf73c14599..0e15f011f9c86895db17d57e0e1e999164d0312c 100644 (file)
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
        xen_domain_type = XEN_HVM_DOMAIN;
 
        xen_setup_features();
+
+       if (!xen_feature(XENFEAT_grant_map_identity)) {
+               pr_warn("Please upgrade your Xen.\n"
+                               "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+       }
+
        if (xen_feature(XENFEAT_dom0))
                xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
        else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644 (file)
index 0000000..3b99860
--- /dev/null
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+       struct page *page;
+       unsigned long virt;
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+               return 0;
+
+       page = alloc_page(GFP_KERNEL);
+       if (page == NULL) {
+               pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+               return -ENOMEM;
+       }
+
+       virt = (unsigned long)__va(page_to_phys(page));
+       pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+       ptep = pte_offset_kernel(pmdp, virt);
+
+       per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+       per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+       return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       int cpu = (long)hcpu;
+       switch (action) {
+       case CPU_UP_PREPARE:
+               if (alloc_xen_mm32_scratch_page(cpu))
+                       return NOTIFY_BAD;
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+       .notifier_call  = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+       unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+       pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+       *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+       local_flush_tlb_kernel_page(virt);
+
+       return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+       put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+       size_t size, enum dma_data_direction dir,
+       void (*op)(const void *, size_t, int))
+{
+       unsigned long pfn;
+       size_t left = size;
+
+       pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+       offset %= PAGE_SIZE;
+
+       do {
+               size_t len = left;
+               void *vaddr;
+       
+               if (!pfn_valid(pfn))
+               {
+                       /* Cannot map the page, we don't know its physical address.
+                        * Return and hope for the best */
+                       if (!xen_feature(XENFEAT_grant_map_identity))
+                               return;
+                       vaddr = xen_mm32_remap_page(handle) + offset;
+                       op(vaddr, len, dir);
+                       xen_mm32_unmap(vaddr - offset);
+               } else {
+                       struct page *page = pfn_to_page(pfn);
+
+                       if (PageHighMem(page)) {
+                               if (len + offset > PAGE_SIZE)
+                                       len = PAGE_SIZE - offset;
+
+                               if (cache_is_vipt_nonaliasing()) {
+                                       vaddr = kmap_atomic(page);
+                                       op(vaddr + offset, len, dir);
+                                       kunmap_atomic(vaddr);
+                               } else {
+                                       vaddr = kmap_high_get(page);
+                                       if (vaddr) {
+                                               op(vaddr + offset, len, dir);
+                                               kunmap_high(page);
+                                       }
+                               }
+                       } else {
+                               vaddr = page_address(page) + offset;
+                               op(vaddr, len, dir);
+                       }
+               }
+
+               offset = 0;
+               pfn++;
+               left -= len;
+       } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+       /* Cannot use __dma_page_dev_to_cpu because we don't have a
+        * struct page for handle */
+
+       if (dir != DMA_TO_DEVICE)
+               outer_inv_range(handle, handle + size);
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+       if (dir == DMA_FROM_DEVICE) {
+               outer_inv_range(handle, handle + size);
+       } else {
+               outer_clean_range(handle, handle + size);
+       }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+
+{
+       if (!__generic_dma_ops(hwdev)->unmap_page)
+               return;
+       if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               return;
+
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+               return;
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+               return;
+       __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+       int cpu;
+
+       if (!xen_initial_domain())
+               return 0;
+
+       register_cpu_notifier(&xen_mm32_cpu_notifier);
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (alloc_xen_mm32_scratch_page(cpu)) {
+                       put_online_cpus();
+                       unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+                       return -ENOMEM;
+               }
+       }
+       put_online_cpus();
+
+       return 0;
+}
+arch_initcall(xen_mm32_init);
index 97baf44278171680a0932f949c22e8e26c54926f..05485777625474e835fc41a5d57fb494ca778552 100644 (file)
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
        unsigned long pfn;
        unsigned long mfn;
        unsigned long nr_pages;
-       struct rb_node rbnode_mach;
        struct rb_node rbnode_phys;
 };
 
 static rwlock_t p2m_lock;
 struct rb_root phys_to_mach = RB_ROOT;
 EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
 
 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
 {
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
                parent = *link;
                entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
 
-               if (new->mfn == entry->mfn)
-                       goto err_out;
                if (new->pfn == entry->pfn)
                        goto err_out;
 
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
 
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
-       struct rb_node **link = &mach_to_phys.rb_node;
-       struct rb_node *parent = NULL;
-       struct xen_p2m_entry *entry;
-       int rc = 0;
-
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
-               if (new->mfn == entry->mfn)
-                       goto err_out;
-               if (new->pfn == entry->pfn)
-                       goto err_out;
-
-               if (new->mfn < entry->mfn)
-                       link = &(*link)->rb_left;
-               else
-                       link = &(*link)->rb_right;
-       }
-       rb_link_node(&new->rbnode_mach, parent, link);
-       rb_insert_color(&new->rbnode_mach, &mach_to_phys);
-       goto out;
-
-err_out:
-       rc = -EINVAL;
-       pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
-                       __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
-       return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
-       struct rb_node *n = mach_to_phys.rb_node;
-       struct xen_p2m_entry *entry;
-       unsigned long irqflags;
-
-       read_lock_irqsave(&p2m_lock, irqflags);
-       while (n) {
-               entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
-               if (entry->mfn <= mfn &&
-                               entry->mfn + entry->nr_pages > mfn) {
-                       read_unlock_irqrestore(&p2m_lock, irqflags);
-                       return entry->pfn + (mfn - entry->mfn);
-               }
-               if (mfn < entry->mfn)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
-       }
-       read_unlock_irqrestore(&p2m_lock, irqflags);
-
-       return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                            struct gnttab_map_grant_ref *kmap_ops,
                            struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
                        p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
                        if (p2m_entry->pfn <= pfn &&
                                        p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
-                               rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
                                rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
                                write_unlock_irqrestore(&p2m_lock, irqflags);
                                kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
        p2m_entry->mfn = mfn;
 
        write_lock_irqsave(&p2m_lock, irqflags);
-       if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
-               (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+       if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
                write_unlock_irqrestore(&p2m_lock, irqflags);
                return false;
        }
index c294e67d3925e13401526b4b57ce9637bdb92092..ae67e88c28b99497b4ae394bc849bdb20cacdd02 100644 (file)
@@ -150,7 +150,6 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data,
        kernel_neon_begin_partial(28);
        sha2_ce_transform(blocks, data, sctx->state, NULL, len);
        kernel_neon_end();
-       data += blocks * SHA256_BLOCK_SIZE;
 }
 
 static int sha224_finup(struct shash_desc *desc, const u8 *data,
index d064047612b12acb2668927c4b3cedf504d22c11..52b484b6aa1a7fec251a72b028f655523c74236a 100644 (file)
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
  */
 #define ARM_MAX_BRP            16
 #define ARM_MAX_WRP            16
-#define ARM_MAX_HBP_SLOTS      (ARM_MAX_BRP + ARM_MAX_WRP)
 
 /* Virtual debug register bases. */
 #define AARCH64_DBG_REG_BVR    0
index 3df21feeabddfca221e3a65063634ca339e4c0d9..286b1bec547ce2a060d01cf816893b2b4aef9d12 100644 (file)
@@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
 #define KSTK_EIP(tsk)  ((unsigned long)task_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk)  ((unsigned long)task_pt_regs(tsk)->sp)
+#define KSTK_ESP(tsk)  user_stack_pointer(task_pt_regs(tsk))
 
 /*
  * Prefetching support
index 501000fadb6fde2249096d3dc6ee027cba427e14..41ed9e13795e59411b701f7590d3a5386f52ba01 100644 (file)
@@ -137,7 +137,7 @@ struct pt_regs {
        (!((regs)->pstate & PSR_F_BIT))
 
 #define user_stack_pointer(regs) \
-       (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
+       (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
index ad8aebb1cdef7d289d609015bc22d51498cddc08..3dca15634e69c6b1b34db6e00b3564089129326b 100644 (file)
@@ -270,6 +270,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
        case CPU_PM_ENTER:
                if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
                        fpsimd_save_state(&current->thread.fpsimd_state);
+               this_cpu_write(fpsimd_last_state, NULL);
                break;
        case CPU_PM_EXIT:
                if (current->mm)
index bed028364a93235006b467c20daeb8725b9a4615..873069056229a89c4808b49f73e60d196ff53634 100644 (file)
@@ -373,10 +373,6 @@ ENTRY(__boot_cpu_mode)
        .long   0
        .popsection
 
-       .align  3
-2:     .quad   .
-       .quad   PAGE_OFFSET
-
 #ifdef CONFIG_SMP
        .align  3
 1:     .quad   .
index 0f08dfd69ebc73ea99b7b7f6d68c4b2f320eb2d1..dfa6e3e74fddec289649c1baba921378f521f611 100644 (file)
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
                return false;
 
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+               affinity = cpu_online_mask;
                ret = true;
+       }
 
-       /*
-        * when using forced irq_set_affinity we must ensure that the cpu
-        * being offlined is not present in the affinity mask, it may be
-        * selected as the target CPU otherwise
-        */
-       affinity = cpu_online_mask;
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index 422ebd63b619253d23c7a82d5fb14b322dc47fe1..6762ad705587fa34fff0281546273a6930ddbcbf 100644 (file)
@@ -24,6 +24,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
                        return regs->compat_lr;
        }
 
+       if ((u32)idx == PERF_REG_ARM64_SP)
+               return regs->sp;
+
+       if ((u32)idx == PERF_REG_ARM64_PC)
+               return regs->pc;
+
        return regs->regs[idx];
 }
 
index 398ab05081b437438d41fabf6691aee5c76e1361..e0ef8ba4fdb904433fae578dc8d5782c1ac94cb1 100644 (file)
@@ -231,9 +231,27 @@ void exit_thread(void)
 {
 }
 
+static void tls_thread_flush(void)
+{
+       asm ("msr tpidr_el0, xzr");
+
+       if (is_compat_task()) {
+               current->thread.tp_value = 0;
+
+               /*
+                * We need to ensure ordering between the shadow state and the
+                * hardware state, so that we don't corrupt the hardware state
+                * with a stale shadow state during context switch.
+                */
+               barrier();
+               asm ("msr tpidrro_el0, xzr");
+       }
+}
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
+       tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
 }
 
index 70526cfda056e7216ef7f773b059d31b6ca4f88a..fe63ac5e9bf5582cb4e494262a9d714d4b6758a7 100644 (file)
@@ -87,7 +87,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
                        break;
                }
        }
-       for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
+
+       for (i = 0; i < ARM_MAX_WRP; ++i) {
                if (current->thread.debug.hbp_watch[i] == bp) {
                        info.si_errno = -((i << 1) + 1);
                        break;
@@ -662,8 +663,10 @@ static int compat_gpr_get(struct task_struct *target,
                        kbuf += sizeof(reg);
                } else {
                        ret = copy_to_user(ubuf, &reg, sizeof(reg));
-                       if (ret)
+                       if (ret) {
+                               ret = -EFAULT;
                                break;
+                       }
 
                        ubuf += sizeof(reg);
                }
@@ -701,8 +704,10 @@ static int compat_gpr_set(struct task_struct *target,
                        kbuf += sizeof(reg);
                } else {
                        ret = copy_from_user(&reg, ubuf, sizeof(reg));
-                       if (ret)
-                               return ret;
+                       if (ret) {
+                               ret = -EFAULT;
+                               break;
+                       }
 
                        ubuf += sizeof(reg);
                }
index f6f0ccf35ae67110c030d72c6c7648e04168c6e9..edb146d0185740b18de7d7cd365ae5d01ea1cbdc 100644 (file)
@@ -78,6 +78,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 static const char *cpu_name;
+static const char *machine_name;
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -309,6 +310,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
                while (true)
                        cpu_relax();
        }
+
+       machine_name = of_flat_dt_get_machine_name();
 }
 
 /*
@@ -447,21 +450,10 @@ static int c_show(struct seq_file *m, void *v)
 {
        int i;
 
-       /*
-        * Dump out the common processor features in a single line. Userspace
-        * should read the hwcaps with getauxval(AT_HWCAP) rather than
-        * attempting to parse this.
-        */
-       seq_puts(m, "features\t:");
-       for (i = 0; hwcap_str[i]; i++)
-               if (elf_hwcap & (1 << i))
-                       seq_printf(m, " %s", hwcap_str[i]);
-       seq_puts(m, "\n\n");
+       seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+                  cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
 
        for_each_online_cpu(i) {
-               struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
-               u32 midr = cpuinfo->reg_midr;
-
                /*
                 * glibc reads /proc/cpuinfo to determine the number of
                 * online processors, looking for lines beginning with
@@ -470,13 +462,25 @@ static int c_show(struct seq_file *m, void *v)
 #ifdef CONFIG_SMP
                seq_printf(m, "processor\t: %d\n", i);
 #endif
-               seq_printf(m, "implementer\t: 0x%02x\n",
-                          MIDR_IMPLEMENTOR(midr));
-               seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
-               seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
-               seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
        }
 
+       /* dump out the processor features */
+       seq_puts(m, "Features\t: ");
+
+       for (i = 0; hwcap_str[i]; i++)
+               if (elf_hwcap & (1 << i))
+                       seq_printf(m, "%s ", hwcap_str[i]);
+
+       seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
+       seq_printf(m, "CPU architecture: AArch64\n");
+       seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
+       seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
+       seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
+
+       seq_puts(m, "\n");
+
+       seq_printf(m, "Hardware\t: %s\n", machine_name);
+
        return 0;
 }
 
index de2b0226e06df34fd7aaa4bb55e014fc33f9cddf..dc47e53e9e28c15da99e62976a9ca29f71da8bc4 100644 (file)
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
 
        case __ARM_NR_compat_set_tls:
                current->thread.tp_value = regs->regs[0];
+
+               /*
+                * Protect against register corruption from context switch.
+                * See comment in tls_thread_flush.
+                */
+               barrier();
                asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
                return 0;
 
index e28be510380ca758f8cdb76a5da57892cb02e3bf..34b8bd0711e94295b3b8d629e2e032db8d8bce33 100644 (file)
@@ -66,6 +66,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
        else
                kvm_vcpu_block(vcpu);
 
+       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
        return 1;
 }
 
index d968796f4b2d7a88dda3605f0f16b9777879052b..c3191168a994fba06b6f5ddc807dbc8eceec224b 100644 (file)
@@ -80,6 +80,10 @@ __do_hyp_init:
        msr     mair_el2, x4
        isb
 
+       /* Invalidate the stale TLBs from Bootloader */
+       tlbi    alle2
+       dsb     sy
+
        mrs     x4, sctlr_el2
        and     x4, x4, #SCTLR_EL2_EE   // preserve endianness of EL2
        ldr     x5, =SCTLR_EL2_FLAGS
index 1fcdd344c7ad5797605ddbb9ee98bcde3461c295..4ef7a54813e6b72d88a0d40109a26347b91aa7c6 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            352
+#define NR_syscalls            354
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 9cd82fbc7817f716d589368bda3c1d274687a607..b419c6b7ac3739b150000dfd13aff28d3dcc372a 100644 (file)
 #define __NR_sched_setattr     349
 #define __NR_sched_getattr     350
 #define __NR_renameat2         351
+#define __NR_getrandom         352
+#define __NR_memfd_create      353
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 501e102127899c6afaa893bd99d4d555f5bab8ec..05b46c2b08b8d8d4be26c56e8be4d245cef3dab6 100644 (file)
@@ -372,4 +372,6 @@ ENTRY(sys_call_table)
        .long sys_sched_setattr
        .long sys_sched_getattr         /* 350 */
        .long sys_renameat2
+       .long sys_getrandom
+       .long sys_memfd_create
 
index 40e1c1dd0e24a47d38f142ce0c5543d629a0fd50..6feded3b0c4c1eb6d1414082a9f9318041129a39 100644 (file)
@@ -127,7 +127,7 @@ config SECCOMP
 
 endmenu
 
-menu "Advanced setup"
+menu "Kernel features"
 
 config ADVANCED_OPTIONS
        bool "Prompt for advanced kernel configuration options"
@@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES
 
 endchoice
 
-endmenu
-
 source "mm/Kconfig"
 
+endmenu
+
 menu "Executable file formats"
 
 source "fs/Kconfig.binfmt"
index b4a4cb150aa998af14d3d359db7c2b73f26397db..596e485ae7074df3b8cf5abba5d390153c1166c7 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/percpu.h>
 #include <asm/ptrace.h>
+#include <linux/linkage.h>
 
 /*
  * These are per-cpu variables required in entry.S, among other
index 0aa005703a0b5f348848afb5936f650d18f28322..59a89a64a8656b01436f4aaf38ec270f5d8b2a7a 100644 (file)
@@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr,
 
        if ((get_fs().seg < ((unsigned long)addr)) ||
                        (get_fs().seg < ((unsigned long)addr + size - 1))) {
-               pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+               pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
                        type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
                return 0;
        }
 ok:
-       pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+       pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
                        type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
        return 1;
index fd56a8f66489dc68264ac8f70cc70863c6f25380..ea4b233647c1aa32d23562e7abba7dab72d404db 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         381
+#define __NR_syscalls         387
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index 6e75e203092764a0fe42d7211e547af9dd32f35b..1554a6f2a5bb4bf573d532fa35eb8be8deaa1404 100644 (file)
@@ -321,6 +321,22 @@ source "fs/Kconfig"
 
 source "arch/parisc/Kconfig.debug"
 
+config SECCOMP
+       def_bool y
+       prompt "Enable seccomp to safely compute untrusted bytecode"
+       ---help---
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
+         If unsure, say Y. Only embedded should say N here.
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
index d9dc6cd3b7d2bb549bd7bc24efd2339a894aad0b..e5c4da03581056efc5630c47b3dacdfdc1980c07 100644 (file)
@@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
                }
 
                /* String could be altered by userspace after strlen_user() */
-               fsname[len] = '\0';
+               fsname[len - 1] = '\0';
 
                printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname);
                if ( !strcmp(fsname, "hfs") ) {
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h
new file mode 100644 (file)
index 0000000..015f788
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_PARISC_SECCOMP_H
+#define _ASM_PARISC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+
+#endif /* _ASM_PARISC_SECCOMP_H */
index 4b9b10ce1f9d09d771beda26c7b48f5eba8ee75a..a84611835549c2f9a1b93e999f593a058fcc8add 100644 (file)
@@ -60,6 +60,7 @@ struct thread_info {
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
 #define TIF_BLOCKSTEP          10      /* branch stepping? */
+#define TIF_SECCOMP            11      /* secure computing */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -70,11 +71,13 @@ struct thread_info {
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
+#define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |        \
-                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
+                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP)
 
 #ifdef CONFIG_64BIT
 # ifdef CONFIG_COMPAT
index 47e0e21d2272468bbc864e6e30ced36c18929534..8667f18be2385d7423da2efac3803671b500740e 100644 (file)
 #define __NR_sched_getattr     (__NR_Linux + 335)
 #define __NR_utimes            (__NR_Linux + 336)
 #define __NR_renameat2         (__NR_Linux + 337)
+#define __NR_seccomp           (__NR_Linux + 338)
+#define __NR_getrandom         (__NR_Linux + 339)
+#define __NR_memfd_create      (__NR_Linux + 340)
 
-#define __NR_Linux_syscalls    (__NR_renameat2 + 1)
+#define __NR_Linux_syscalls    (__NR_memfd_create + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e842ee233db44ef902899280fbadd456175a4de3..3bab72462ab53f2cc74e205a7e43e45cc77b2a0f 100644 (file)
@@ -270,6 +270,12 @@ long do_syscall_trace_enter(struct pt_regs *regs)
 {
        long ret = 0;
 
+       /* Do the secure computing check first. */
+       if (secure_computing(regs->gr[20])) {
+               /* seccomp failures shouldn't expose any additional code. */
+               return -1;
+       }
+
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
                ret = -1L;
index 83878601103701df4497913c89d5702897249a38..7ef22e3387e09f8b63b2a6532b0c995bb12b6e96 100644 (file)
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
        /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
        /* Light-weight-syscall entry must always be located at 0xb0 */
        /* WARNING: Keep this number updated with table size changes */
-#define __NR_lws_entries (2)
+#define __NR_lws_entries (3)
 
 lws_entry:
        gate    lws_start, %r0          /* increase privilege */
@@ -502,7 +502,7 @@ lws_exit:
 
        
        /***************************************************
-               Implementing CAS as an atomic operation:
+               Implementing 32bit CAS as an atomic operation:
 
                %r26 - Address to examine
                %r25 - Old value to check (old)
@@ -659,6 +659,230 @@ cas_action:
        ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
 
 
+       /***************************************************
+               New CAS implementation which uses pointers and variable size
+               information. The value pointed by old and new MUST NOT change
+               while performing CAS. The lock only protect the value at %r26.
+
+               %r26 - Address to examine
+               %r25 - Pointer to the value to check (old)
+               %r24 - Pointer to the value to set (new)
+               %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+               %r28 - Return non-zero on failure
+               %r21 - Kernel error code
+
+               %r21 has the following meanings:
+
+               EAGAIN - CAS is busy, ldcw failed, try again.
+               EFAULT - Read or write failed.
+
+               Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
+
+       ****************************************************/
+
+       /* ELF32 Process entry path */
+lws_compare_and_swap_2:
+#ifdef CONFIG_64BIT
+       /* Clip the input registers */
+       depdi   0, 31, 32, %r26
+       depdi   0, 31, 32, %r25
+       depdi   0, 31, 32, %r24
+       depdi   0, 31, 32, %r23
+#endif
+
+       /* Check the validity of the size pointer */
+       subi,>>= 4, %r23, %r0
+       b,n     lws_exit_nosys
+
+       /* Jump to the functions which will load the old and new values into
+          registers depending on the their size */
+       shlw    %r23, 2, %r29
+       blr     %r29, %r0
+       nop
+
+       /* 8bit load */
+4:     ldb     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+5:     ldb     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 16bit load */
+6:     ldh     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+7:     ldh     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 32bit load */
+8:     ldw     0(%sr3,%r25), %r25
+       b       cas2_lock_start
+9:     ldw     0(%sr3,%r24), %r24
+       nop
+       nop
+       nop
+       nop
+       nop
+
+       /* 64bit load */
+#ifdef CONFIG_64BIT
+10:    ldd     0(%sr3,%r25), %r25
+11:    ldd     0(%sr3,%r24), %r24
+#else
+       /* Load new value into r22/r23 - high/low */
+10:    ldw     0(%sr3,%r25), %r22
+11:    ldw     4(%sr3,%r25), %r23
+       /* Load new value into fr4 for atomic store later */
+12:    flddx   0(%sr3,%r24), %fr4
+#endif
+
+cas2_lock_start:
+       /* Load start of lock table */
+       ldil    L%lws_lock_start, %r20
+       ldo     R%lws_lock_start(%r20), %r28
+
+       /* Extract four bits from r26 and hash lock (Bits 4-7) */
+       extru  %r26, 27, 4, %r20
+
+       /* Find lock to use, the hash is either one of 0 to
+          15, multiplied by 16 (keep it 16-byte aligned)
+          and add to the lock table offset. */
+       shlw    %r20, 4, %r20
+       add     %r20, %r28, %r20
+
+       rsm     PSW_SM_I, %r0                   /* Disable interrupts */
+       /* COW breaks can cause contention on UP systems */
+       LDCW    0(%sr2,%r20), %r28              /* Try to acquire the lock */
+       cmpb,<>,n       %r0, %r28, cas2_action  /* Did we get it? */
+cas2_wouldblock:
+       ldo     2(%r0), %r28                    /* 2nd case */
+       ssm     PSW_SM_I, %r0
+       b       lws_exit                        /* Contended... */
+       ldo     -EAGAIN(%r0), %r21              /* Spin in userspace */
+
+       /*
+               prev = *addr;
+               if ( prev == old )
+                 *addr = new;
+               return prev;
+       */
+
+       /* NOTES:
+               This all works becuse intr_do_signal
+               and schedule both check the return iasq
+               and see that we are on the kernel page
+               so this process is never scheduled off
+               or is ever sent any signal of any sort,
+               thus it is wholly atomic from usrspaces
+               perspective
+       */
+cas2_action:
+       /* Jump to the correct function */
+       blr     %r29, %r0
+       /* Set %r28 as non-zero for now */
+       ldo     1(%r0),%r28
+
+       /* 8bit CAS */
+13:    ldb,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+14:    stb,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 16bit CAS */
+15:    ldh,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+16:    sth,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 32bit CAS */
+17:    ldw,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+18:    stw,ma  %r24, 0(%sr3,%r26)
+       b       cas2_end
+       copy    %r0, %r28
+       nop
+       nop
+
+       /* 64bit CAS */
+#ifdef CONFIG_64BIT
+19:    ldd,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r25, %r0
+       b,n     cas2_end
+20:    std,ma  %r24, 0(%sr3,%r26)
+       copy    %r0, %r28
+#else
+       /* Compare first word */
+19:    ldw,ma  0(%sr3,%r26), %r29
+       sub,=   %r29, %r22, %r0
+       b,n     cas2_end
+       /* Compare second word */
+20:    ldw,ma  4(%sr3,%r26), %r29
+       sub,=   %r29, %r23, %r0
+       b,n     cas2_end
+       /* Perform the store */
+21:    fstdx   %fr4, 0(%sr3,%r26)
+       copy    %r0, %r28
+#endif
+
+cas2_end:
+       /* Free lock */
+       stw,ma  %r20, 0(%sr2,%r20)
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
+       /* Return to userspace, set no error */
+       b       lws_exit
+       copy    %r0, %r21
+
+22:
+       /* Error occurred on load or store */
+       /* Free lock */
+       stw     %r20, 0(%sr2,%r20)
+       ssm     PSW_SM_I, %r0
+       ldo     1(%r0),%r28
+       b       lws_exit
+       ldo     -EFAULT(%r0),%r21       /* set errno */
+       nop
+       nop
+       nop
+
+       /* Exception table entries, for the load and store, return EFAULT.
+          Each of the entries must be relocated. */
+       ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+       ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
+#endif
+
        /* Make sure nothing else is placed on this page */
        .align PAGE_SIZE
 END(linux_gateway_page)
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
        /* Light-weight-syscall table */
        /* Start of lws table. */
 ENTRY(lws_table)
-       LWS_ENTRY(compare_and_swap32)   /* 0 - ELF32 Atomic compare and swap */
-       LWS_ENTRY(compare_and_swap64)   /* 1 - ELF64 Atomic compare and swap */
+       LWS_ENTRY(compare_and_swap32)           /* 0 - ELF32 Atomic 32bit CAS */
+       LWS_ENTRY(compare_and_swap64)           /* 1 - ELF64 Atomic 32bit CAS */
+       LWS_ENTRY(compare_and_swap_2)           /* 2 - ELF32 Atomic 64bit CAS */
 END(lws_table)
        /* End of lws table */
 
index 84c5d3a58fa189b91e8671c29879d27238ccb160..b563d9c8268b153429a06cfc85924c236d00c688 100644 (file)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
        ENTRY_SAME(renameat2)
+       ENTRY_SAME(seccomp)
+       ENTRY_SAME(getrandom)
+       ENTRY_SAME(memfd_create)        /* 340 */
 
        /* Nothing yet */
 
index 4bee1a6d41d04d0694ee6ad45ab98e9ebfd70b44..45fd06cdc3e86639d3ddc829c4d44c8db0069bd0 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
index 6d7b22f41b5077a6d0f614fceb58c596856a209d..77d7bf3ca2acb014def979d68d21c7f94401ed10 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
index 4b07bade1ba99942e88ea9cd4dd251996cbede70..269d6e47c67d2a998272f27545ec238766c68dc8 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=24
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
index 3c72fa615bd9e29e135df83b7944fa48c922db8f..7594c5ac64818f6b01537bb0c0be3519c73bd455 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_BLK_DEV_INITRD=y
index 95e545d9f25c94935c1cee35a9e2b45e7c0169cf..c8b6a9ddb21bf813c448dfc4f847a3f611c23dd1 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_COMPAT_BRK is not set
index cec044a3ff69dd6c05dcfec122fcb53653b412f8..e5e7838af0088f04b8faf5f40d8204962bd0bb6f 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
index f26b267eb71fa947db544f202e6cd55b581b1821..f6c02f8cdc62bd6f92a231e2afad8985074b7351 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_VSX=y
 CONFIG_SMP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
index 438e813dc9cb1c18cf4605f5d937135100d39530..587f5514f9b1c4ad20a019c13596cb191bc308a7 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y
 CONFIG_SMP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
index fdee37fab81c5aa1f70263e7c4979fc0df7e9956..2e637c881d2b44be41b9db66145ac7863cbe1be1 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
index a905063281cc329b376605fa583d1e3282ce36c2..50375f1f59e7a509641dcfd45036f328f7a267ee 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=2048
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
index 58e3dbf43ca419f6877c7f87ac856e4ace764560..4428ee428f4ea992f11a85ff233e93cbf3825488 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
index 279b80f3bb293d0bceceecb9eb38ff344e26b8fb..c0c61fa9cd9e4d3440798c9c1c0f93cb9276fcfa 100644 (file)
                                 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
 #define STACK_FRAME_MARKER     12
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE   32
+#else
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
+#endif
+
 /* Size of dummy stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     128
 #define __SIGNAL_FRAMESIZE32   64
@@ -60,6 +66,7 @@
 #define STACK_FRAME_REGS_MARKER        ASM_CONST(0x72656773)
 #define STACK_INT_FRAME_SIZE   (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
 #define STACK_FRAME_MARKER     2
+#define STACK_FRAME_MIN_SIZE   STACK_FRAME_OVERHEAD
 
 /* Size of stack frame allocated when calling signal handler. */
 #define __SIGNAL_FRAMESIZE     64
index 542bc0f0673fc8e792d984b8215511cf4b4566b4..7d8a600688058ec5025cfd3688b6eb369cdadea5 100644 (file)
@@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */
 SYSCALL_SPU(sched_setattr)
 SYSCALL_SPU(sched_getattr)
 SYSCALL_SPU(renameat2)
+SYSCALL_SPU(seccomp)
+SYSCALL_SPU(getrandom)
+SYSCALL_SPU(memfd_create)
index 5ce5552ab9f5ca24fe479bfd81ea4a915a478a90..4e9af3fd43e7d062755255d932c2123ea5f54c8a 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          358
+#define __NR_syscalls          361
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 2d526f7b48da121f60b2c3c7c069b07199a76ff6..0688fc06e18394268002ae2b7e419f183e6a7dc4 100644 (file)
 #define __NR_sched_setattr     355
 #define __NR_sched_getattr     356
 #define __NR_renameat2         357
+#define __NR_seccomp           358
+#define __NR_getrandom         359
+#define __NR_memfd_create      360
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 72c20bb16d266b4ed0aa2160382414f161ad0663..79294c4c5015ea83771e4ebc849a561bd376144d 100644 (file)
@@ -62,10 +62,10 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
        }
 
        kvm->arch.hpt_cma_alloc = 0;
-       page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+       page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT));
        if (page) {
                hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-               memset((void *)hpt, 0, (1 << order));
+               memset((void *)hpt, 0, (1ul << order));
                kvm->arch.hpt_cma_alloc = 1;
        }
 
index 74d1e780748b58f17f8987218a6184c87aae63da..2396dda282cdef0ed5c11c6ab7c3f4f479d0ac04 100644 (file)
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
                return 0;               /* must be 16-byte aligned */
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return 0;
-       if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+       if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
                return 1;
        /*
         * sp could decrease when we jump off an interrupt stack
index 97ac8dc33667f1319802fd08a8c1d9d56b8905b5..5e1ed1575aabe23c245edcdff06433cfb0a62327 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/opal.h>
 #include <asm/cputable.h>
+#include <asm/machdep.h>
 
 static int opal_hmi_handler_nb_init;
 struct OpalHmiEvtNode {
@@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void)
        }
        return 0;
 }
-subsys_initcall(opal_hmi_handler_init);
+machine_subsys_initcall(powernv, opal_hmi_handler_init);
index c904583baf4b94d46e5a0ae3b4f5dca134a4bc87..17ee193960a09aab4e5e4502525f4b55788bdcce 100644 (file)
@@ -113,7 +113,7 @@ out:
 static int pseries_remove_mem_node(struct device_node *np)
 {
        const char *type;
-       const unsigned int *regs;
+       const __be32 *regs;
        unsigned long base;
        unsigned int lmb_size;
        int ret = -EINVAL;
@@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np)
        if (!regs)
                return ret;
 
-       base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       base = be64_to_cpu(*(unsigned long *)regs);
+       lmb_size = be32_to_cpu(regs[3]);
 
        pseries_remove_memblock(base, lmb_size);
        return 0;
@@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np)
 static int pseries_add_mem_node(struct device_node *np)
 {
        const char *type;
-       const unsigned int *regs;
+       const __be32 *regs;
        unsigned long base;
        unsigned int lmb_size;
        int ret = -EINVAL;
@@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np)
        if (!regs)
                return ret;
 
-       base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       base = be64_to_cpu(*(unsigned long *)regs);
+       lmb_size = be32_to_cpu(regs[3]);
 
        /*
         * Update memory region to represent the memory add
@@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
        struct of_drconf_cell *new_drmem, *old_drmem;
        unsigned long memblock_size;
        u32 entries;
-       u32 *p;
+       __be32 *p;
        int i, rc = -EINVAL;
 
        memblock_size = pseries_memory_block_size();
        if (!memblock_size)
                return -EINVAL;
 
-       p = (u32 *) pr->old_prop->value;
+       p = (__be32 *) pr->old_prop->value;
        if (!p)
                return -EINVAL;
 
@@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
         * entries. Get the niumber of entries and skip to the array of
         * of_drconf_cell's.
         */
-       entries = *p++;
+       entries = be32_to_cpu(*p++);
        old_drmem = (struct of_drconf_cell *)p;
 
-       p = (u32 *)pr->prop->value;
+       p = (__be32 *)pr->prop->value;
        p++;
        new_drmem = (struct of_drconf_cell *)p;
 
        for (i = 0; i < entries; i++) {
-               if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) &&
-                   (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) {
-                       rc = pseries_remove_memblock(old_drmem[i].base_addr,
+               if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
+                   (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
+                       rc = pseries_remove_memblock(
+                               be64_to_cpu(old_drmem[i].base_addr),
                                                     memblock_size);
                        break;
-               } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) &&
-                          (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) {
-                       rc = memblock_add(old_drmem[i].base_addr,
+               } else if ((!(be32_to_cpu(old_drmem[i].flags) &
+                           DRCONF_MEM_ASSIGNED)) &&
+                           (be32_to_cpu(new_drmem[i].flags) &
+                           DRCONF_MEM_ASSIGNED)) {
+                       rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
                                          memblock_size);
                        rc = (rc < 0) ? -EINVAL : 0;
                        break;
                }
        }
-
        return rc;
 }
 
index 2fcccc0c997cc3102dcb67e5ea2f60fc4f4200b2..c81661e756a0396bb8e90dee214492b154081d91 100644 (file)
 #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
                              sizeof(struct ipl_block_fcp))
 
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8)
+#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
 
 #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
                              sizeof(struct ipl_block_ccw))
 
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8)
+#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
 
 #define IPL_MAX_SUPPORTED_VERSION (0)
 
@@ -38,10 +38,11 @@ struct ipl_list_hdr {
        u8  pbt;
        u8  flags;
        u16 reserved2;
+       u8  loadparm[8];
 } __attribute__((packed));
 
 struct ipl_block_fcp {
-       u8  reserved1[313-1];
+       u8  reserved1[305-1];
        u8  opt;
        u8  reserved2[3];
        u16 reserved3;
@@ -62,7 +63,6 @@ struct ipl_block_fcp {
                                 offsetof(struct ipl_block_fcp, scp_data)))
 
 struct ipl_block_ccw {
-       u8  load_parm[8];
        u8  reserved1[84];
        u8  reserved2[2];
        u16 devno;
index b76317c1f3eb5b542d3ee628326da48910866770..5efb2fe186e78275faa6ecc223b94a8c96c86a70 100644 (file)
@@ -1127,7 +1127,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
                                            unsigned long addr, pte_t *ptep)
 {
        pgste_t pgste;
-       pte_t pte;
+       pte_t pte, oldpte;
        int young;
 
        if (mm_has_pgste(vma->vm_mm)) {
@@ -1135,12 +1135,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
                pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
        }
 
-       pte = *ptep;
+       oldpte = pte = *ptep;
        ptep_flush_direct(vma->vm_mm, addr, ptep);
        young = pte_young(pte);
        pte = pte_mkold(pte);
 
        if (mm_has_pgste(vma->vm_mm)) {
+               pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
                pgste = pgste_set_pte(ptep, pgste, pte);
                pgste_set_unlock(ptep, pgste);
        } else
@@ -1330,6 +1331,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
        ptep_flush_direct(vma->vm_mm, address, ptep);
 
        if (mm_has_pgste(vma->vm_mm)) {
+               pgste_set_key(ptep, pgste, entry, vma->vm_mm);
                pgste = pgste_set_pte(ptep, pgste, entry);
                pgste_set_unlock(ptep, pgste);
        } else
index 22aac5885ba23eca4590d9219aefc1fab34d9325..39badb9ca0b30c6b7b32ce720165e85c3c33319a 100644 (file)
@@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
 DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
                   IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
 
-static struct attribute *ipl_fcp_attrs[] = {
-       &sys_ipl_type_attr.attr,
-       &sys_ipl_device_attr.attr,
-       &sys_ipl_fcp_wwpn_attr.attr,
-       &sys_ipl_fcp_lun_attr.attr,
-       &sys_ipl_fcp_bootprog_attr.attr,
-       &sys_ipl_fcp_br_lba_attr.attr,
-       NULL,
-};
-
-static struct attribute_group ipl_fcp_attr_group = {
-       .attrs = ipl_fcp_attrs,
-};
-
-/* CCW ipl device attributes */
-
 static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
                                     struct kobj_attribute *attr, char *page)
 {
@@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
 static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
        __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
 
+static struct attribute *ipl_fcp_attrs[] = {
+       &sys_ipl_type_attr.attr,
+       &sys_ipl_device_attr.attr,
+       &sys_ipl_fcp_wwpn_attr.attr,
+       &sys_ipl_fcp_lun_attr.attr,
+       &sys_ipl_fcp_bootprog_attr.attr,
+       &sys_ipl_fcp_br_lba_attr.attr,
+       &sys_ipl_ccw_loadparm_attr.attr,
+       NULL,
+};
+
+static struct attribute_group ipl_fcp_attr_group = {
+       .attrs = ipl_fcp_attrs,
+};
+
+/* CCW ipl device attributes */
+
 static struct attribute *ipl_ccw_attrs_vm[] = {
        &sys_ipl_type_attr.attr,
        &sys_ipl_device_attr.attr,
@@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
 DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
                   reipl_block_fcp->ipl_info.fcp.devno);
 
-static struct attribute *reipl_fcp_attrs[] = {
-       &sys_reipl_fcp_device_attr.attr,
-       &sys_reipl_fcp_wwpn_attr.attr,
-       &sys_reipl_fcp_lun_attr.attr,
-       &sys_reipl_fcp_bootprog_attr.attr,
-       &sys_reipl_fcp_br_lba_attr.attr,
-       NULL,
-};
-
-static struct attribute_group reipl_fcp_attr_group = {
-       .attrs = reipl_fcp_attrs,
-};
-
-/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
-       reipl_block_ccw->ipl_info.ccw.devno);
-
 static void reipl_get_ascii_loadparm(char *loadparm,
                                     struct ipl_parameter_block *ibp)
 {
-       memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
+       memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
        EBCASC(loadparm, LOADPARM_LEN);
        loadparm[LOADPARM_LEN] = 0;
        strim(loadparm);
@@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
                return -EINVAL;
        }
        /* initialize loadparm with blanks */
-       memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
+       memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
        /* copy and convert to ebcdic */
-       memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
-       ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
+       memcpy(ipb->hdr.loadparm, buf, lp_len);
+       ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
        return len;
 }
 
+/* FCP wrapper */
+static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *page)
+{
+       return reipl_generic_loadparm_show(reipl_block_fcp, page);
+}
+
+static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
+                                       struct kobj_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
+       __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
+                                           reipl_fcp_loadparm_store);
+
+static struct attribute *reipl_fcp_attrs[] = {
+       &sys_reipl_fcp_device_attr.attr,
+       &sys_reipl_fcp_wwpn_attr.attr,
+       &sys_reipl_fcp_lun_attr.attr,
+       &sys_reipl_fcp_bootprog_attr.attr,
+       &sys_reipl_fcp_br_lba_attr.attr,
+       &sys_reipl_fcp_loadparm_attr.attr,
+       NULL,
+};
+
+static struct attribute_group reipl_fcp_attr_group = {
+       .attrs = reipl_fcp_attrs,
+};
+
+/* CCW reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+       reipl_block_ccw->ipl_info.ccw.devno);
+
 /* NSS wrapper */
 static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
                                       struct kobj_attribute *attr, char *page)
@@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
        /* LOADPARM */
        /* check if read scp info worked and set loadparm */
        if (sclp_ipl_info.is_valid)
-               memcpy(ipb->ipl_info.ccw.load_parm,
-                               &sclp_ipl_info.loadparm, LOADPARM_LEN);
+               memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
        else
                /* read scp info failed: set empty loadparm (EBCDIC blanks) */
-               memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
+               memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
        ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
 
        /* VM PARM */
@@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void)
                return rc;
        }
 
-       if (ipl_info.type == IPL_TYPE_FCP)
+       if (ipl_info.type == IPL_TYPE_FCP) {
                memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
-       else {
+               /*
+                * Fix loadparm: There are systems where the (SCSI) LOADPARM
+                * is invalid in the SCSI IPL parameter block, so take it
+                * always from sclp_ipl_info.
+                */
+               memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+                      LOADPARM_LEN);
+       } else {
                reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
                reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
                reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
@@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void)
 
 static int __init s390_ipl_init(void)
 {
+       char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
+
        sclp_get_ipl_info(&sclp_ipl_info);
+       /*
+        * Fix loadparm: There are systems where the (SCSI) LOADPARM
+        * returned by read SCP info is invalid (contains EBCDIC blanks)
+        * when the system has been booted via diag308. In that case we use
+        * the value from diag308, if available.
+        *
+        * There are also systems where diag308 store does not work in
+        * case the system is booted from HMC. Fortunately in this case
+        * READ SCP info provides the correct value.
+        */
+       if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
+           diag308_set_works)
+               memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
+                      LOADPARM_LEN);
        shutdown_actions_init();
        shutdown_triggers_init();
        return 0;
index 65fc3979c2f11bb037fd7f646c18a1188b4c4553..7cf18f8d4cb4fc9d281169ed8dee675f4c68b6be 100644 (file)
@@ -22,13 +22,11 @@ __kernel_clock_gettime:
        basr    %r5,0
 0:     al      %r5,21f-0b(%r5)                 /* get &_vdso_data */
        chi     %r2,__CLOCK_REALTIME
-       je      10f
+       je      11f
        chi     %r2,__CLOCK_MONOTONIC
        jne     19f
 
        /* CLOCK_MONOTONIC */
-       ltr     %r3,%r3
-       jz      9f                              /* tp == NULL */
 1:     l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     1b
@@ -67,12 +65,10 @@ __kernel_clock_gettime:
        j       6b
 8:     st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
-9:     lhi     %r2,0
+       lhi     %r2,0
        br      %r14
 
        /* CLOCK_REALTIME */
-10:    ltr     %r3,%r3                         /* tp == NULL */
-       jz      18f
 11:    l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     11b
@@ -111,7 +107,7 @@ __kernel_clock_gettime:
        j       15b
 17:    st      %r2,0(%r3)                      /* store tp->tv_sec */
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
-18:    lhi     %r2,0
+       lhi     %r2,0
        br      %r14
 
        /* Fallback to system call */
index 91940ed33a4ab21686f890481545cbfdd9404468..3f34e09db5f4d4d0e5f7a12b9e3acb89a72a6773 100644 (file)
@@ -21,7 +21,7 @@ __kernel_clock_gettime:
        .cfi_startproc
        larl    %r5,_vdso_data
        cghi    %r2,__CLOCK_REALTIME
-       je      4f
+       je      5f
        cghi    %r2,__CLOCK_THREAD_CPUTIME_ID
        je      9f
        cghi    %r2,-2          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
@@ -30,8 +30,6 @@ __kernel_clock_gettime:
        jne     12f
 
        /* CLOCK_MONOTONIC */
-       ltgr    %r3,%r3
-       jz      3f                              /* tp == NULL */
 0:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     0b
@@ -53,12 +51,10 @@ __kernel_clock_gettime:
        j       1b
 2:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
-3:     lghi    %r2,0
+       lghi    %r2,0
        br      %r14
 
        /* CLOCK_REALTIME */
-4:     ltr     %r3,%r3                         /* tp == NULL */
-       jz      8f
 5:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     5b
@@ -80,7 +76,7 @@ __kernel_clock_gettime:
        j       6b
 7:     stg     %r0,0(%r3)                      /* store tp->tv_sec */
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
-8:     lghi    %r2,0
+       lghi    %r2,0
        br      %r14
 
        /* CLOCK_THREAD_CPUTIME_ID for this thread */
index ce81eb2ab76a207128069119acaac4e92918fa99..81b0e11521e444501ff5b1fc723965374ea3bd7e 100644 (file)
@@ -1317,19 +1317,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return -EINVAL;
        }
 
-       switch (kvm_run->exit_reason) {
-       case KVM_EXIT_S390_SIEIC:
-       case KVM_EXIT_UNKNOWN:
-       case KVM_EXIT_INTR:
-       case KVM_EXIT_S390_RESET:
-       case KVM_EXIT_S390_UCONTROL:
-       case KVM_EXIT_S390_TSCH:
-       case KVM_EXIT_DEBUG:
-               break;
-       default:
-               BUG();
-       }
-
        vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
        vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
        if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
index 19daa53a3da4a739f8f1b89cb7b88bf4dc0e5606..5404a6261db91a4fa204a9e2ad839b97530604c7 100644 (file)
@@ -986,11 +986,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
        pte_t *ptep;
 
        down_read(&mm->mmap_sem);
+retry:
        ptep = get_locked_pte(current->mm, addr, &ptl);
        if (unlikely(!ptep)) {
                up_read(&mm->mmap_sem);
                return -EFAULT;
        }
+       if (!(pte_val(*ptep) & _PAGE_INVALID) &&
+            (pte_val(*ptep) & _PAGE_PROTECT)) {
+                       pte_unmap_unlock(*ptep, ptl);
+                       if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
+                               up_read(&mm->mmap_sem);
+                               return -EFAULT;
+                       }
+                       goto retry;
+               }
 
        new = old = pgste_get_lock(ptep);
        pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
index bf8daf9d9c9b9b02b2c7e7a97e2a1f958b38708f..37458f38b22093d9bf1b6468577fb39de3f54013 100644 (file)
@@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
                get_page(page);
+               __flush_anon_page(page, addr);
+               flush_dcache_page(page);
                pages[*nr] = page;
                (*nr)++;
 
index 778178f4c7d132c15f4f7700b0ad31a20ea7d24b..36327438caf0daffe826df5f4ad992885dd68cd8 100644 (file)
@@ -23,6 +23,7 @@ config X86
        def_bool y
        select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_HAS_FAST_MULTIPLIER
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select HAVE_AOUT if X86_32
index afcd35d331de6348071f65ab52e373353e225786..cfe3b954d5e41cbd96be1fdb147c164b63581814 100644 (file)
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x)
 
 #include <asm-generic/bitops/sched.h>
 
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
 #include <asm/arch_hweight.h>
 
 #include <asm-generic/bitops/const_hweight.h>
index 5be9063545d278bd978d8c14e67fb7be08cc937b..3874693c0e53adc3593a21374154e99941944703 100644 (file)
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
+extern pte_t level1_fixmap_pgt[512];
 extern pgd_t init_level4_pgt[];
 
 #define swapper_pg_dir init_level4_pgt
index e8a1201c3293bf074bedfaa7243f0e670064d457..16fb0099b7f295ed40c206e25478037f0b2ff651 100644 (file)
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
  *
  * We can construct this by grafting the Xen provided pagetable into
  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working.  We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- * NOTE: for PVH, the page tables are native.
+ * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
+ * kernel has a physical mapping to start with - but that's enough to
+ * get __va working.  We need to fill in the rest of the physical
+ * mapping once some sort of allocator has been set up.  NOTE: for
+ * PVH, the page tables are native.
  */
 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                /* L3_i[0] -> level2_ident_pgt */
                convert_pfn_mfn(level3_ident_pgt);
                /* L3_k[510] -> level2_kernel_pgt
-                * L3_i[511] -> level2_fixmap_pgt */
+                * L3_k[511] -> level2_fixmap_pgt */
                convert_pfn_mfn(level3_kernel_pgt);
+
+               /* L3_k[511][506] -> level1_fixmap_pgt */
+               convert_pfn_mfn(level2_fixmap_pgt);
        }
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
        l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        addr[1] = (unsigned long)l3;
        addr[2] = (unsigned long)l2;
        /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-        * Both L4[272][0] and L4[511][511] have entries that point to the same
+        * Both L4[272][0] and L4[511][510] have entries that point to the same
         * L2 (PMD) tables. Meaning that if you modify it in __va space
         * it will be also modified in the __ka space! (But if you just
         * modify the PMD table to point to other PTE's or none, then you
         * are OK - which is what cleanup_highmap does) */
        copy_page(level2_ident_pgt, l2);
-       /* Graft it onto L4[511][511] */
+       /* Graft it onto L4[511][510] */
        copy_page(level2_kernel_pgt, l2);
 
-       /* Get [511][510] and graft that in level2_fixmap_pgt */
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-       copy_page(level2_fixmap_pgt, l2);
-       /* Note that we don't do anything with level1_fixmap_pgt which
-        * we don't need. */
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                /* Make pagetable pieces RO */
                set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
 
                /* Pin down new L4 */
                pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
index 54535831f1e197a6a6c56a5cc8a4b08e90d4020b..77881798f7930bd4c1df4ea3cc4a0ab3f30ddc80 100644 (file)
 #include "blk.h"
 
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
-                                            struct bio *bio)
+                                            struct bio *bio,
+                                            bool no_sg_merge)
 {
        struct bio_vec bv, bvprv = { NULL };
-       int cluster, high, highprv = 1, no_sg_merge;
+       int cluster, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
        struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
-       no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
        high = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
 
 void blk_recalc_rq_segments(struct request *rq)
 {
-       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+       bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
+                       &rq->q->queue_flags);
+
+       rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
+                       no_sg_merge);
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+                       bio->bi_vcnt < queue_max_segments(q))
                bio->bi_phys_segments = bio->bi_vcnt;
        else {
                struct bio *nxt = bio->bi_next;
 
                bio->bi_next = NULL;
-               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
                bio->bi_next = nxt;
        }
 
index 4aac82615a46fd2363d03d28007fc5cf6be53929..383ea0cb1f0a295463789d956aae820b334f4a55 100644 (file)
@@ -1321,6 +1321,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
                                continue;
                        set->ops->exit_request(set->driver_data, tags->rqs[i],
                                                hctx_idx, i);
+                       tags->rqs[i] = NULL;
                }
        }
 
@@ -1354,8 +1355,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 
        INIT_LIST_HEAD(&tags->page_list);
 
-       tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
-                                       GFP_KERNEL, set->numa_node);
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
        if (!tags->rqs) {
                blk_mq_free_tags(tags);
                return NULL;
@@ -1379,8 +1381,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                        this_order--;
 
                do {
-                       page = alloc_pages_node(set->numa_node, GFP_KERNEL,
-                                               this_order);
+                       page = alloc_pages_node(set->numa_node,
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                               this_order);
                        if (page)
                                break;
                        if (!this_order--)
@@ -1404,8 +1407,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                        if (set->ops->init_request) {
                                if (set->ops->init_request(set->driver_data,
                                                tags->rqs[i], hctx_idx, i,
-                                               set->numa_node))
+                                               set->numa_node)) {
+                                       tags->rqs[i] = NULL;
                                        goto fail;
+                               }
                        }
 
                        p += rq_size;
@@ -1416,7 +1421,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        return tags;
 
 fail:
-       pr_warn("%s: failed to allocate requests\n", __func__);
        blk_mq_free_rq_map(set, tags, hctx_idx);
        return NULL;
 }
@@ -1936,6 +1940,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       int i;
+
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               set->tags[i] = blk_mq_init_rq_map(set, i);
+               if (!set->tags[i])
+                       goto out_unwind;
+       }
+
+       return 0;
+
+out_unwind:
+       while (--i >= 0)
+               blk_mq_free_rq_map(set, set->tags[i], i);
+
+       set->tags = NULL;
+       return -ENOMEM;
+}
+
+/*
+ * Allocate the request maps associated with this tag_set. Note that this
+ * may reduce the depth asked for, if memory is tight. set->queue_depth
+ * will be updated to reflect the allocated depth.
+ */
+static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       unsigned int depth;
+       int err;
+
+       depth = set->queue_depth;
+       do {
+               err = __blk_mq_alloc_rq_maps(set);
+               if (!err)
+                       break;
+
+               set->queue_depth >>= 1;
+               if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+                       err = -ENOMEM;
+                       break;
+               }
+       } while (set->queue_depth);
+
+       if (!set->queue_depth || err) {
+               pr_err("blk-mq: failed to allocate request map\n");
+               return -ENOMEM;
+       }
+
+       if (depth != set->queue_depth)
+               pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+                                               depth, set->queue_depth);
+
+       return 0;
+}
+
 /*
  * Alloc a tag set to be associated with one or more request queues.
  * May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2003,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
  */
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 {
-       int i;
-
        if (!set->nr_hw_queues)
                return -EINVAL;
        if (!set->queue_depth)
@@ -1966,23 +2023,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                                 sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
-               goto out;
+               return -ENOMEM;
 
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               set->tags[i] = blk_mq_init_rq_map(set, i);
-               if (!set->tags[i])
-                       goto out_unwind;
-       }
+       if (blk_mq_alloc_rq_maps(set))
+               goto enomem;
 
        mutex_init(&set->tag_list_lock);
        INIT_LIST_HEAD(&set->tag_list);
 
        return 0;
-
-out_unwind:
-       while (--i >= 0)
-               blk_mq_free_rq_map(set, set->tags[i], i);
-out:
+enomem:
+       kfree(set->tags);
+       set->tags = NULL;
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2049,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
        }
 
        kfree(set->tags);
+       set->tags = NULL;
 }
 EXPORT_SYMBOL(blk_mq_free_tag_set);
 
index 4db5abf96b9ec1595a822efebfb9df9776af6477..17f5c84ce7bfb588a5a34bc244a7ac38067b4391 100644 (file)
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
         * Initialization must be complete by now.  Finish the initial
         * bypass from queue allocation.
         */
-       queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
-       blk_queue_bypass_end(q);
+       if (!blk_queue_init_done(q)) {
+               queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               blk_queue_bypass_end(q);
+       }
 
        ret = blk_trace_init_sysfs(dev);
        if (ret)
index 791f419431322882a915f88995df7b72552d5507..09da5e4a8e03bb77e33de4b9822ba26bbff8973f 100644 (file)
@@ -28,10 +28,10 @@ struct kobject *block_depr;
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 
-/* For extended devt allocation.  ext_devt_mutex prevents look up
+/* For extended devt allocation.  ext_devt_lock prevents look up
  * results from going away underneath its user.
  */
-static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        }
 
        /* allocate ext devt */
-       mutex_lock(&ext_devt_mutex);
-       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
-       mutex_unlock(&ext_devt_mutex);
+       idr_preload(GFP_KERNEL);
+
+       spin_lock(&ext_devt_lock);
+       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+       spin_unlock(&ext_devt_lock);
+
+       idr_preload_end();
        if (idx < 0)
                return idx == -ENOSPC ? -EBUSY : idx;
 
@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt)
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 }
 
@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
-       blk_free_devt(disk_to_dev(disk)->devt);
 }
 EXPORT_SYMBOL(del_gendisk);
 
@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               mutex_lock(&ext_devt_mutex);
+               spin_lock(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               mutex_unlock(&ext_devt_mutex);
+               spin_unlock(&ext_devt_lock);
        }
 
        return disk;
@@ -1098,6 +1101,7 @@ static void disk_release(struct device *dev)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
+       blk_free_devt(dev->devt);
        disk_release_events(disk);
        kfree(disk->random);
        disk_replace_part_tbl(disk, NULL);
index 789cdea05893bb8e3420ede5d1aa65e7af408e1b..0d9e5f97f0a8afaa397530a77684bacc7b22ab26 100644 (file)
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
 static void part_release(struct device *dev)
 {
        struct hd_struct *p = dev_to_part(dev);
+       blk_free_devt(dev->devt);
        free_part_stats(p);
        free_part_info(p);
        kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
        rcu_assign_pointer(ptbl->last_lookup, NULL);
        kobject_put(part->holder_dir);
        device_del(part_to_dev(part));
-       blk_free_devt(part_devt(part));
 
        hd_struct_put(part);
 }
index 97eb001960b97774e89643f65711d2a68e6a3a8f..2f6e4fb1a1ea14c4a1bcba18618b0bdaf6e1cd9c 100644 (file)
@@ -121,6 +121,7 @@ static int public_key_verify_signature_2(const struct key *key,
 struct asymmetric_key_subtype public_key_subtype = {
        .owner                  = THIS_MODULE,
        .name                   = "public_key",
+       .name_len               = sizeof("public_key") - 1,
        .describe               = public_key_describe,
        .destroy                = public_key_destroy,
        .verify_signature       = public_key_verify_signature_2,
index 79175e6ea0b28493f26079d5a17926e0aab63984..2421f46184ce873076fe3cfc0f195a2d7bf4f902 100644 (file)
@@ -128,6 +128,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
 {
        struct win_certificate wrapper;
        const u8 *pkcs7;
+       unsigned len;
 
        if (ctx->sig_len < sizeof(wrapper)) {
                pr_debug("Signature wrapper too short\n");
@@ -154,33 +155,49 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
                return -ENOTSUPP;
        }
 
-       /* Looks like actual pkcs signature length is in wrapper->length.
-        * size obtained from data dir entries lists the total size of
-        * certificate table which is also aligned to octawrod boundary.
-        *
-        * So set signature length field appropriately.
+       /* It looks like the pkcs signature length in wrapper->length and the
+        * size obtained from the data dir entries, which lists the total size
+        * of certificate table, are both aligned to an octaword boundary, so
+        * we may have to deal with some padding.
         */
        ctx->sig_len = wrapper.length;
        ctx->sig_offset += sizeof(wrapper);
        ctx->sig_len -= sizeof(wrapper);
-       if (ctx->sig_len == 0) {
+       if (ctx->sig_len < 4) {
                pr_debug("Signature data missing\n");
                return -EKEYREJECTED;
        }
 
-       /* What's left should a PKCS#7 cert */
+       /* What's left should be a PKCS#7 cert */
        pkcs7 = pebuf + ctx->sig_offset;
-       if (pkcs7[0] == (ASN1_CONS_BIT | ASN1_SEQ)) {
-               if (pkcs7[1] == 0x82 &&
-                   pkcs7[2] == (((ctx->sig_len - 4) >> 8) & 0xff) &&
-                   pkcs7[3] ==  ((ctx->sig_len - 4)       & 0xff))
-                       return 0;
-               if (pkcs7[1] == 0x80)
-                       return 0;
-               if (pkcs7[1] > 0x82)
-                       return -EMSGSIZE;
+       if (pkcs7[0] != (ASN1_CONS_BIT | ASN1_SEQ))
+               goto not_pkcs7;
+
+       switch (pkcs7[1]) {
+       case 0 ... 0x7f:
+               len = pkcs7[1] + 2;
+               goto check_len;
+       case ASN1_INDEFINITE_LENGTH:
+               return 0;
+       case 0x81:
+               len = pkcs7[2] + 3;
+               goto check_len;
+       case 0x82:
+               len = ((pkcs7[2] << 8) | pkcs7[3]) + 4;
+               goto check_len;
+       case 0x83 ... 0xff:
+               return -EMSGSIZE;
+       default:
+               goto not_pkcs7;
        }
 
+check_len:
+       if (len <= ctx->sig_len) {
+               /* There may be padding */
+               ctx->sig_len = len;
+               return 0;
+       }
+not_pkcs7:
        pr_debug("Signature data not PKCS#7\n");
        return -ELIBBAD;
 }
index 2da8660262e5ccefa00e7608958adaf8c7328e9a..81dc75033f1590111d488216302f475c42b42137 100644 (file)
@@ -33,7 +33,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
                      void *handler_context, void *region_context)
 {
        int i;
-       u8 *value = (u8 *)&value64;
+       u8 *value = (u8 *)value64;
 
        if (address > 0xff || !value64)
                return AE_BAD_PARAMETER;
index 9dfec48dd4e503b82994e50d2ae7d06768f84d61..fddc1e86f9d0e73648a26de5cdc1fd82ec9f9b03 100644 (file)
@@ -610,7 +610,7 @@ static int acpi_lpss_suspend_late(struct device *dev)
        return acpi_dev_suspend_late(dev);
 }
 
-static int acpi_lpss_restore_early(struct device *dev)
+static int acpi_lpss_resume_early(struct device *dev)
 {
        int ret = acpi_dev_resume_early(dev);
 
@@ -650,15 +650,15 @@ static int acpi_lpss_runtime_resume(struct device *dev)
 static struct dev_pm_domain acpi_lpss_pm_domain = {
        .ops = {
 #ifdef CONFIG_PM_SLEEP
-               .suspend_late = acpi_lpss_suspend_late,
-               .restore_early = acpi_lpss_restore_early,
                .prepare = acpi_subsys_prepare,
                .complete = acpi_subsys_complete,
                .suspend = acpi_subsys_suspend,
-               .resume_early = acpi_subsys_resume_early,
+               .suspend_late = acpi_lpss_suspend_late,
+               .resume_early = acpi_lpss_resume_early,
                .freeze = acpi_subsys_freeze,
                .poweroff = acpi_subsys_suspend,
-               .poweroff_late = acpi_subsys_suspend_late,
+               .poweroff_late = acpi_lpss_suspend_late,
+               .restore_early = acpi_lpss_resume_early,
 #endif
 #ifdef CONFIG_PM_RUNTIME
                .runtime_suspend = acpi_lpss_runtime_suspend,
index 68f725839eb6d61bf129664922b1b22d7bdb0d05..1b13b921dda933e373b3e237886e55269df94aa7 100644 (file)
@@ -316,6 +316,45 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                    acpi_ns_check_package_list(info, package, elements, count);
                break;
 
+       case ACPI_PTYPE2_UUID_PAIR:
+
+               /* The package must contain pairs of (UUID + type) */
+
+               if (count & 1) {
+                       expected_count = count + 1;
+                       goto package_too_small;
+               }
+
+               while (count > 0) {
+                       status = acpi_ns_check_object_type(info, elements,
+                                                          package->ret_info.
+                                                          object_type1, 0);
+                       if (ACPI_FAILURE(status)) {
+                               return (status);
+                       }
+
+                       /* Validate length of the UUID buffer */
+
+                       if ((*elements)->buffer.length != 16) {
+                               ACPI_WARN_PREDEFINED((AE_INFO,
+                                                     info->full_pathname,
+                                                     info->node_flags,
+                                                     "Invalid length for UUID Buffer"));
+                               return (AE_AML_OPERAND_VALUE);
+                       }
+
+                       status = acpi_ns_check_object_type(info, elements + 1,
+                                                          package->ret_info.
+                                                          object_type2, 0);
+                       if (ACPI_FAILURE(status)) {
+                               return (status);
+                       }
+
+                       elements += 2;
+                       count -= 2;
+               }
+               break;
+
        default:
 
                /* Should not get here if predefined info table is correct */
index 1c162e7be045405f267ef8d8140795508f8ea1c0..5fdfe65fe165d0a95b72e258a6c4584682f7019a 100644 (file)
@@ -534,20 +534,6 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
                        " invalid.\n");
        }
 
-       /*
-        * When fully charged, some batteries wrongly report
-        * capacity_now = design_capacity instead of = full_charge_capacity
-        */
-       if (battery->capacity_now > battery->full_charge_capacity
-           && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
-               if (battery->capacity_now != battery->design_capacity)
-                       printk_once(KERN_WARNING FW_BUG
-                               "battery: reported current charge level (%d) "
-                               "is higher than reported maximum charge level (%d).\n",
-                               battery->capacity_now, battery->full_charge_capacity);
-               battery->capacity_now = battery->full_charge_capacity;
-       }
-
        if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
            && battery->capacity_now >= 0 && battery->capacity_now <= 100)
                battery->capacity_now = (battery->capacity_now *
index 9922cc46b15c44d75171b38f57518562829586da..cb6066c809ea03dddec42b0871e22a67ef242726 100644 (file)
@@ -1030,6 +1030,10 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
        DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
        DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
        {
+       ec_flag_msi, "Clevo W350etq", {
+       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."),
+       DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL},
+       {
        ec_validate_ecdt, "ASUS hardware", {
        DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
        {
index 3dca36d4ad26ef22d6ecea66e166ae22cde9ab33..17f9ec501972ef208b505ed881173e2dac0cc2ea 100644 (file)
@@ -1071,9 +1071,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 
        if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
 
-               cpuidle_pause_and_lock();
                /* Protect against cpu-hotplug */
                get_online_cpus();
+               cpuidle_pause_and_lock();
 
                /* Disable all cpuidle devices */
                for_each_online_cpu(cpu) {
@@ -1100,8 +1100,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                                cpuidle_enable_device(dev);
                        }
                }
-               put_online_cpus();
                cpuidle_resume_and_unlock();
+               put_online_cpus();
        }
 
        return 0;
index 9a9298994e262cdbb17ee883009bbae6d2d082d3..3bf7764659a4f5643557eb2aee4b5b327da4f88f 100644 (file)
@@ -667,8 +667,14 @@ static ssize_t
 acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
                     char *buf) {
        struct acpi_device *acpi_dev = to_acpi_device(dev);
+       acpi_status status;
+       unsigned long long sun;
+
+       status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
 
-       return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
+       return sprintf(buf, "%llu\n", sun);
 }
 static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
 
@@ -690,7 +696,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
 {
        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
        acpi_status status;
-       unsigned long long sun;
        int result = 0;
 
        /*
@@ -731,14 +736,10 @@ static int acpi_device_setup_files(struct acpi_device *dev)
        if (dev->pnp.unique_id)
                result = device_create_file(&dev->dev, &dev_attr_uid);
 
-       status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
-       if (ACPI_SUCCESS(status)) {
-               dev->pnp.sun = (unsigned long)sun;
+       if (acpi_has_method(dev->handle, "_SUN")) {
                result = device_create_file(&dev->dev, &dev_attr_sun);
                if (result)
                        goto end;
-       } else {
-               dev->pnp.sun = (unsigned long)-1;
        }
 
        if (acpi_has_method(dev->handle, "_STA")) {
index 826884392e6b4b9df067623cfad758c29ea954a7..fcbda105616eb703b87e50ee88650cd4dde72d78 100644 (file)
@@ -82,9 +82,9 @@ module_param(allow_duplicates, bool, 0644);
  * For Windows 8 systems: used to decide if video module
  * should skip registering backlight interface of its own.
  */
-static int use_native_backlight_param = 1;
+static int use_native_backlight_param = -1;
 module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
-static bool use_native_backlight_dmi = false;
+static bool use_native_backlight_dmi = true;
 
 static int register_count;
 static struct mutex video_list_lock;
@@ -417,6 +417,12 @@ static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init video_disable_native_backlight(const struct dmi_system_id *d)
+{
+       use_native_backlight_dmi = false;
+       return 0;
+}
+
 static struct dmi_system_id video_dmi_table[] __initdata = {
        /*
         * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -720,6 +726,41 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
                },
        },
+
+       /*
+        * These models have a working acpi_video backlight control, and using
+        * native backlight causes a regression where backlight does not work
+        * when userspace is not handling brightness key events. Disable
+        * native_backlight on these to fix this:
+        * https://bugzilla.kernel.org/show_bug.cgi?id=81691
+        */
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "ThinkPad T420",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"),
+               },
+       },
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "ThinkPad T520",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
+               },
+       },
+
+       /* The native backlight controls do not work on some older machines */
+       {
+        /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */
+        .callback = video_disable_native_backlight,
+        .ident = "HP ENVY 15 Notebook",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
+               },
+       },
        {}
 };
 
index a29f8012fb08483cfb67e47b9d104db1e7db4954..a0cc0edafc78e27a9b1cee2031d1a74e523425d2 100644 (file)
@@ -305,6 +305,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+       { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+       { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -442,6 +450,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9182 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
@@ -1329,6 +1339,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
                ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
 
+       /*
+        * The JMicron chip 361/363 contains one SATA controller and one
+        * PATA controller,for powering on these both controllers, we must
+        * follow the sequence one by one, otherwise one of them can not be
+        * powered on successfully, so here we disable the async suspend
+        * method for these chips.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+               (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+               pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+               device_disable_async_suspend(&pdev->dev);
+
        /* acquire resources */
        rc = pcim_enable_device(pdev);
        if (rc)
index f1fef74e503cd94716dfe090ab397d83118065f5..032904402c9509af14eafdc36993e8d6eb39ac88 100644 (file)
  */
 
 #include <linux/ahci_platform.h>
-#include <linux/reset.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/fuse.h>
 #include <soc/tegra/pmc.h>
+
 #include "ahci.h"
 
 #define SATA_CONFIGURATION_0                           0x180
@@ -180,9 +183,12 @@ static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
 
        /* Pad calibration */
 
-       /* FIXME Always use calibration 0. Change this to read the calibration
-        * fuse once the fuse driver has landed. */
-       val = 0;
+       ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val);
+       if (ret) {
+               dev_err(&tegra->pdev->dev,
+                       "failed to read calibration fuse: %d\n", ret);
+               return ret;
+       }
 
        calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
 
index c6962300b93c7c6292ed91d62611db6678af1f49..f03aab187f4da1cf16d3f2cb34642f4fcaad7434 100644 (file)
@@ -78,6 +78,9 @@
 #define CFG_MEM_RAM_SHUTDOWN           0x00000070
 #define BLOCK_MEM_RDY                  0x00000074
 
+/* Max retry for link down */
+#define MAX_LINK_DOWN_RETRY 3
+
 struct xgene_ahci_context {
        struct ahci_host_priv *hpriv;
        struct device *dev;
@@ -145,6 +148,14 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
        return rc;
 }
 
+static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
+{
+       void __iomem *diagcsr = ctx->csr_diag;
+
+       return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
+               readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
+}
+
 /**
  * xgene_ahci_read_id - Read ID data from the specified device
  * @dev: device
@@ -229,8 +240,11 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
  * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
  * report disparity error and etc. In addition, during COMRESET, there can
  * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
- * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
- * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
+ * reboot cycle regression, sometimes the PHY reports link down even if the
+ * device is present because of speed negotiation failure. so need to retry
+ * the COMRESET to get the link up. The following algorithm is followed to
+ * proper configure the hardware PHY during COMRESET:
  *
  * Alg Part 1:
  * 1. Start the PHY at Gen3 speed (default setting)
@@ -246,9 +260,15 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
  * Alg Part 2:
  * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
  *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
- * 2. Go to Alg Part 3
+ * 2. Go to Alg Part 4
  *
  * Alg Part 3:
+ * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
+ *    communication establishment failed and maximum link down attempts are
+ *    less than Max attempts 3 then goto Alg Part 1.
+ * 2. Go to Alg Part 4.
+ *
+ * Alg Part 4:
  * 1. Clear any pending from register PORT_SCR_ERR.
  *
  * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
@@ -267,19 +287,27 @@ static int xgene_ahci_do_hardreset(struct ata_link *link,
        u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
        void __iomem *port_mmio = ahci_port_base(ap);
        struct ata_taskfile tf;
+       int link_down_retry = 0;
        int rc;
-       u32 val;
-
-       /* clear D2H reception area to properly wait for D2H FIS */
-       ata_tf_init(link->device, &tf);
-       tf.command = ATA_BUSY;
-       ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-       rc = sata_link_hardreset(link, timing, deadline, online,
+       u32 val, sstatus;
+
+       do {
+               /* clear D2H reception area to properly wait for D2H FIS */
+               ata_tf_init(link->device, &tf);
+               tf.command = ATA_BUSY;
+               ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+               rc = sata_link_hardreset(link, timing, deadline, online,
                                 ahci_check_ready);
+               if (*online) {
+                       val = readl(port_mmio + PORT_SCR_ERR);
+                       if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+                               dev_warn(ctx->dev, "link has error\n");
+                       break;
+               }
 
-       val = readl(port_mmio + PORT_SCR_ERR);
-       if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
-               dev_warn(ctx->dev, "link has error\n");
+               sata_scr_read(link, SCR_STATUS, &sstatus);
+       } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
+                (sstatus & 0xff) == 0x1);
 
        /* clear all errors if any pending */
        val = readl(port_mmio + PORT_SCR_ERR);
@@ -467,6 +495,11 @@ static int xgene_ahci_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       if (xgene_ahci_is_memram_inited(ctx)) {
+               dev_info(dev, "skip clock and PHY initialization\n");
+               goto skip_clk_phy;
+       }
+
        /* Due to errata, HW requires full toggle transition */
        rc = ahci_platform_enable_clks(hpriv);
        if (rc)
@@ -479,7 +512,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
 
        /* Configure the host controller */
        xgene_ahci_hw_init(hpriv);
-
+skip_clk_phy:
        hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
 
        rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
index 893e30e9a9efab5662d5361e83051f9ac1c650c0..ffbe625e6fd2738726e77d42313aa9a09866ec4b 100644 (file)
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
        /* SATA Controller IDE (Coleto Creek) */
        { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+       /* SATA Controller IDE (9 Series) */
+       { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
 
        { }     /* terminate list */
 };
index 4d1a5d2c4287f2979c34a2913c0cb4018f573776..47e418b8c8baa0eee13b0a2843c25371a83bd338 100644 (file)
@@ -143,6 +143,18 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
        };
        const struct ata_port_info *ppi[] = { &info, NULL };
 
+       /*
+        * The JMicron chip 361/363 contains one SATA controller and one
+        * PATA controller,for powering on these both controllers, we must
+        * follow the sequence one by one, otherwise one of them can not be
+        * powered on successfully, so here we disable the async suspend
+        * method for these chips.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+               (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+               pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+               device_disable_async_suspend(&pdev->dev);
+
        return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
 }
 
index 7d1326985bee8b4d3e9dcb326bf795622901bf49..bfc90b8547f23f1c243a4f1c6d7c00fc95f632a6 100644 (file)
@@ -146,6 +146,9 @@ struct regcache_ops {
        enum regcache_type type;
        int (*init)(struct regmap *map);
        int (*exit)(struct regmap *map);
+#ifdef CONFIG_DEBUG_FS
+       void (*debugfs_init)(struct regmap *map);
+#endif
        int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
        int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
        int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
index 6a7e4fa12854c8ac365edbe6dbf918c04d2ccd59..f3e8fe0cc65030a8fea51c00994a343df8220659 100644 (file)
@@ -194,10 +194,6 @@ static void rbtree_debugfs_init(struct regmap *map)
 {
        debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
 }
-#else
-static void rbtree_debugfs_init(struct regmap *map)
-{
-}
 #endif
 
 static int regcache_rbtree_init(struct regmap *map)
@@ -222,8 +218,6 @@ static int regcache_rbtree_init(struct regmap *map)
                        goto err;
        }
 
-       rbtree_debugfs_init(map);
-
        return 0;
 
 err:
@@ -532,6 +526,9 @@ struct regcache_ops regcache_rbtree_ops = {
        .name = "rbtree",
        .init = regcache_rbtree_init,
        .exit = regcache_rbtree_exit,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_init = rbtree_debugfs_init,
+#endif
        .read = regcache_rbtree_read,
        .write = regcache_rbtree_write,
        .sync = regcache_rbtree_sync,
index 29b4128da0b08ce6b8eda048557ed9093b07efa0..5617da6dc898b3edeff6f8b6cdca38fbab849900 100644 (file)
@@ -698,7 +698,7 @@ int regcache_sync_block(struct regmap *map, void *block,
                        unsigned int block_base, unsigned int start,
                        unsigned int end)
 {
-       if (regmap_can_raw_write(map))
+       if (regmap_can_raw_write(map) && !map->use_single_rw)
                return regcache_sync_block_raw(map, block, cache_present,
                                               block_base, start, end);
        else
index 45d812c0ea7751868d3d14d7691da6f74493b54a..65ea7b256b3eab603100bfe451383bb1fa339ae8 100644 (file)
@@ -538,6 +538,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
 
                next = rb_next(&range_node->node);
        }
+
+       if (map->cache_ops && map->cache_ops->debugfs_init)
+               map->cache_ops->debugfs_init(map);
 }
 
 void regmap_debugfs_exit(struct regmap *map)
index 78f43fb2fe84647d5e97d5c9f6468cf6c8704187..1cf427bc0d4a8f0ddd9571b69f5dcc14a2167bdb 100644 (file)
@@ -109,7 +109,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
 
 bool regmap_volatile(struct regmap *map, unsigned int reg)
 {
-       if (!regmap_readable(map, reg))
+       if (!map->format.format_write && !regmap_readable(map, reg))
                return false;
 
        if (map->volatile_reg)
index 294a7dd2519028a21260579f6c25b5a9aff011f7..f032ed6dd459564ece3a6cf119070c72ca2991cc 100644 (file)
@@ -282,6 +282,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xA8DB */
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
index db1e9560d8a721c5165955651462c11d5b306f0a..5c8e7fe077458f31e83b0bcf65137b255e4a8336 100644 (file)
@@ -3918,7 +3918,6 @@ skip_create_disk:
        if (rv) {
                dev_err(&dd->pdev->dev,
                        "Unable to allocate request queue\n");
-               rv = -ENOMEM;
                goto block_queue_alloc_init_error;
        }
 
index a3b042c4d448dcb4ad0a21272dd8ff83527f3b9d..00d469c7f9f79742c659011088b144d5bf4b6bef 100644 (file)
@@ -462,17 +462,21 @@ static int null_add_dev(void)
        struct gendisk *disk;
        struct nullb *nullb;
        sector_t size;
+       int rv;
 
        nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
-       if (!nullb)
+       if (!nullb) {
+               rv = -ENOMEM;
                goto out;
+       }
 
        spin_lock_init(&nullb->lock);
 
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
                submit_queues = nr_online_nodes;
 
-       if (setup_queues(nullb))
+       rv = setup_queues(nullb);
+       if (rv)
                goto out_free_nullb;
 
        if (queue_mode == NULL_Q_MQ) {
@@ -484,22 +488,29 @@ static int null_add_dev(void)
                nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
                nullb->tag_set.driver_data = nullb;
 
-               if (blk_mq_alloc_tag_set(&nullb->tag_set))
+               rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+               if (rv)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_tags;
+               }
        } else if (queue_mode == NULL_Q_BIO) {
                nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_queues;
+               }
                blk_queue_make_request(nullb->q, null_queue_bio);
                init_driver_queues(nullb);
        } else {
                nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
-               if (!nullb->q)
+               if (!nullb->q) {
+                       rv = -ENOMEM;
                        goto out_cleanup_queues;
+               }
                blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
                blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
                init_driver_queues(nullb);
@@ -509,8 +520,10 @@ static int null_add_dev(void)
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
 
        disk = nullb->disk = alloc_disk_node(1, home_node);
-       if (!disk)
+       if (!disk) {
+               rv = -ENOMEM;
                goto out_cleanup_blk_queue;
+       }
 
        mutex_lock(&lock);
        list_add_tail(&nullb->list, &nullb_list);
@@ -544,7 +557,7 @@ out_cleanup_queues:
 out_free_nullb:
        kfree(nullb);
 out:
-       return -ENOMEM;
+       return rv;
 }
 
 static int __init null_init(void)
index 623c84145b792b9ddaa852e45c2cdbf80b1f08d5..4b97baf8afa317347ca7ff531f2d8828394084a2 100644 (file)
@@ -5087,9 +5087,11 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
        set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
 
-       rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
-       if (!rbd_dev->rq_wq)
+       rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
+       if (!rbd_dev->rq_wq) {
+               ret = -ENOMEM;
                goto err_out_mapping;
+       }
 
        ret = rbd_bus_add_dev(rbd_dev);
        if (ret)
index 6f550d9e7a2dbd1258a2ba833bcda6fb97a003a2..a60f26400705e11aae4ef6abcb8c840e94a33806 100644 (file)
@@ -586,6 +586,30 @@ static int arm_ccn_pmu_type_eq(u32 a, u32 b)
        return 0;
 }
 
+static void arm_ccn_pmu_event_destroy(struct perf_event *event)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+       struct hw_perf_event *hw = &event->hw;
+
+       if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+               clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
+       } else {
+               struct arm_ccn_component *source =
+                               ccn->dt.pmu_counters[hw->idx].source;
+
+               if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
+                               CCN_CONFIG_EVENT(event->attr.config) ==
+                               CCN_EVENT_WATCHPOINT)
+                       clear_bit(hw->config_base, source->xp.dt_cmp_mask);
+               else
+                       clear_bit(hw->config_base, source->pmu_events_mask);
+               clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+       }
+
+       ccn->dt.pmu_counters[hw->idx].source = NULL;
+       ccn->dt.pmu_counters[hw->idx].event = NULL;
+}
+
 static int arm_ccn_pmu_event_init(struct perf_event *event)
 {
        struct arm_ccn *ccn;
@@ -599,6 +623,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
                return -ENOENT;
 
        ccn = pmu_to_arm_ccn(event->pmu);
+       event->destroy = arm_ccn_pmu_event_destroy;
 
        if (hw->sample_period) {
                dev_warn(ccn->dev, "Sampling not supported!\n");
@@ -731,30 +756,6 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
        return 0;
 }
 
-static void arm_ccn_pmu_event_free(struct perf_event *event)
-{
-       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
-       struct hw_perf_event *hw = &event->hw;
-
-       if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
-               clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
-       } else {
-               struct arm_ccn_component *source =
-                               ccn->dt.pmu_counters[hw->idx].source;
-
-               if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
-                               CCN_CONFIG_EVENT(event->attr.config) ==
-                               CCN_EVENT_WATCHPOINT)
-                       clear_bit(hw->config_base, source->xp.dt_cmp_mask);
-               else
-                       clear_bit(hw->config_base, source->pmu_events_mask);
-               clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
-       }
-
-       ccn->dt.pmu_counters[hw->idx].source = NULL;
-       ccn->dt.pmu_counters[hw->idx].event = NULL;
-}
-
 static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
 {
        u64 res;
@@ -1027,8 +1028,6 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
 {
        arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
-
-       arm_ccn_pmu_event_free(event);
 }
 
 static void arm_ccn_pmu_event_read(struct perf_event *event)
index 27c542ba9e73491d3acec9667bf9ad2c67c798b2..8f3c04a91511637010e86957d00ed383e5deae49 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_COMMON_CLK)        += clk-gate.o
 obj-$(CONFIG_COMMON_CLK)       += clk-mux.o
 obj-$(CONFIG_COMMON_CLK)       += clk-composite.o
 obj-$(CONFIG_COMMON_CLK)       += clk-fractional-divider.o
+obj-$(CONFIG_COMMON_CLK)       += clk-gpio-gate.o
 ifeq ($(CONFIG_OF), y)
 obj-$(CONFIG_COMMON_CLK)       += clk-conf.o
 endif
@@ -50,6 +51,7 @@ obj-$(CONFIG_ARCH_MMP)                        += mmp/
 endif
 obj-$(CONFIG_PLAT_ORION)               += mvebu/
 obj-$(CONFIG_ARCH_MXS)                 += mxs/
+obj-$(CONFIG_ARCH_PXA)                 += pxa/
 obj-$(CONFIG_COMMON_CLK_QCOM)          += qcom/
 obj-$(CONFIG_ARCH_ROCKCHIP)            += rockchip/
 obj-$(CONFIG_COMMON_CLK_SAMSUNG)       += samsung/
index 4a58c55255bd884f3f1e7deea52048712b297729..51fd87fb7ba691e8a52e40ddc1ee4524ecd3e781 100644 (file)
@@ -45,7 +45,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
 {
        struct clk_gate *gate = to_clk_gate(hw);
        int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
-       unsigned long flags = 0;
+       unsigned long uninitialized_var(flags);
        u32 reg;
 
        set ^= enable;
diff --git a/drivers/clk/clk-gpio-gate.c b/drivers/clk/clk-gpio-gate.c
new file mode 100644 (file)
index 0000000..08e4322
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gpio gated clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/device.h>
+
+/**
+ * DOC: basic gpio gated clock which can be enabled and disabled
+ *      with gpio output
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gpio
+ * rate - inherits rate from parent.  No clk_set_rate support
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
+static int clk_gpio_gate_enable(struct clk_hw *hw)
+{
+       struct clk_gpio *clk = to_clk_gpio(hw);
+
+       gpiod_set_value(clk->gpiod, 1);
+
+       return 0;
+}
+
+static void clk_gpio_gate_disable(struct clk_hw *hw)
+{
+       struct clk_gpio *clk = to_clk_gpio(hw);
+
+       gpiod_set_value(clk->gpiod, 0);
+}
+
+static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
+{
+       struct clk_gpio *clk = to_clk_gpio(hw);
+
+       return gpiod_get_value(clk->gpiod);
+}
+
+const struct clk_ops clk_gpio_gate_ops = {
+       .enable = clk_gpio_gate_enable,
+       .disable = clk_gpio_gate_disable,
+       .is_enabled = clk_gpio_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
+
+/**
+ * clk_register_gpio - register a gpip clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @gpiod: gpio descriptor to gate this clock
+ */
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+               const char *parent_name, struct gpio_desc *gpiod,
+               unsigned long flags)
+{
+       struct clk_gpio *clk_gpio = NULL;
+       struct clk *clk = ERR_PTR(-EINVAL);
+       struct clk_init_data init = { NULL };
+       unsigned long gpio_flags;
+       int err;
+
+       if (gpiod_is_active_low(gpiod))
+               gpio_flags = GPIOF_OUT_INIT_HIGH;
+       else
+               gpio_flags = GPIOF_OUT_INIT_LOW;
+
+       if (dev)
+               err = devm_gpio_request_one(dev, desc_to_gpio(gpiod),
+                                           gpio_flags, name);
+       else
+               err = gpio_request_one(desc_to_gpio(gpiod), gpio_flags, name);
+
+       if (err) {
+               pr_err("%s: %s: Error requesting clock control gpio %u\n",
+                      __func__, name, desc_to_gpio(gpiod));
+               return ERR_PTR(err);
+       }
+
+       if (dev)
+               clk_gpio = devm_kzalloc(dev, sizeof(struct clk_gpio),
+                                       GFP_KERNEL);
+       else
+               clk_gpio = kzalloc(sizeof(struct clk_gpio), GFP_KERNEL);
+
+       if (!clk_gpio) {
+               clk = ERR_PTR(-ENOMEM);
+               goto clk_register_gpio_gate_err;
+       }
+
+       init.name = name;
+       init.ops = &clk_gpio_gate_ops;
+       init.flags = flags | CLK_IS_BASIC;
+       init.parent_names = (parent_name ? &parent_name : NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       clk_gpio->gpiod = gpiod;
+       clk_gpio->hw.init = &init;
+
+       clk = clk_register(dev, &clk_gpio->hw);
+
+       if (!IS_ERR(clk))
+               return clk;
+
+       if (!dev)
+               kfree(clk_gpio);
+
+clk_register_gpio_gate_err:
+       gpiod_put(gpiod);
+
+       return clk;
+}
+EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
+
+#ifdef CONFIG_OF
+/**
+ * The clk_register_gpio_gate has to be delayed, because the EPROBE_DEFER
+ * can not be handled properly at of_clk_init() call time.
+ */
+
+struct clk_gpio_gate_delayed_register_data {
+       struct device_node *node;
+       struct mutex lock;
+       struct clk *clk;
+};
+
+static struct clk *of_clk_gpio_gate_delayed_register_get(
+               struct of_phandle_args *clkspec,
+               void *_data)
+{
+       struct clk_gpio_gate_delayed_register_data *data = _data;
+       struct clk *clk;
+       const char *clk_name = data->node->name;
+       const char *parent_name;
+       struct gpio_desc *gpiod;
+       int gpio;
+
+       mutex_lock(&data->lock);
+
+       if (data->clk) {
+               mutex_unlock(&data->lock);
+               return data->clk;
+       }
+
+       gpio = of_get_named_gpio_flags(data->node, "enable-gpios", 0, NULL);
+       if (gpio < 0) {
+               mutex_unlock(&data->lock);
+               if (gpio != -EPROBE_DEFER)
+                       pr_err("%s: %s: Can't get 'enable-gpios' DT property\n",
+                              __func__, clk_name);
+               return ERR_PTR(gpio);
+       }
+       gpiod = gpio_to_desc(gpio);
+
+       parent_name = of_clk_get_parent_name(data->node, 0);
+
+       clk = clk_register_gpio_gate(NULL, clk_name, parent_name, gpiod, 0);
+       if (IS_ERR(clk)) {
+               mutex_unlock(&data->lock);
+               return clk;
+       }
+
+       data->clk = clk;
+       mutex_unlock(&data->lock);
+
+       return clk;
+}
+
+/**
+ * of_gpio_gate_clk_setup() - Setup function for gpio controlled clock
+ */
+void __init of_gpio_gate_clk_setup(struct device_node *node)
+{
+       struct clk_gpio_gate_delayed_register_data *data;
+
+       data = kzalloc(sizeof(struct clk_gpio_gate_delayed_register_data),
+                      GFP_KERNEL);
+       if (!data)
+               return;
+
+       data->node = node;
+       mutex_init(&data->lock);
+
+       of_clk_add_provider(node, of_clk_gpio_gate_delayed_register_get, data);
+}
+EXPORT_SYMBOL_GPL(of_gpio_gate_clk_setup);
+CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
+#endif
index 52d58279a6121dbf8a5b67c263c4188074979ba2..4896ae9e23da0c23ed52872cf1992f95609e1b59 100644 (file)
@@ -119,11 +119,11 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
        if (!c)
                return;
 
-       seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu\n",
+       seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
                   level * 3 + 1, "",
                   30 - level * 3, c->name,
                   c->enable_count, c->prepare_count, clk_get_rate(c),
-                  clk_get_accuracy(c));
+                  clk_get_accuracy(c), clk_get_phase(c));
 }
 
 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
@@ -145,8 +145,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
        struct clk *c;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
-       seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy\n");
-       seq_puts(s, "--------------------------------------------------------------------------------\n");
+       seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
+       seq_puts(s, "----------------------------------------------------------------------------------------\n");
 
        clk_prepare_lock();
 
@@ -182,6 +182,7 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
        seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
        seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
+       seq_printf(s, "\"phase\": %d", clk_get_phase(c));
 }
 
 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
@@ -266,6 +267,11 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
        if (!d)
                goto err_out;
 
+       d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry,
+                       (u32 *)&clk->phase);
+       if (!d)
+               goto err_out;
+
        d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
                        (u32 *)&clk->flags);
        if (!d)
@@ -1725,6 +1731,77 @@ out:
 }
 EXPORT_SYMBOL_GPL(clk_set_parent);
 
+/**
+ * clk_set_phase - adjust the phase shift of a clock signal
+ * @clk: clock signal source
+ * @degrees: number of degrees the signal is shifted
+ *
+ * Shifts the phase of a clock signal by the specified
+ * degrees. Returns 0 on success, -EERROR otherwise.
+ *
+ * This function makes no distinction about the input or reference
+ * signal that we adjust the clock signal phase against. For example
+ * phase locked-loop clock signal generators we may shift phase with
+ * respect to feedback clock signal input, but for other cases the
+ * clock phase may be shifted with respect to some other, unspecified
+ * signal.
+ *
+ * Additionally the concept of phase shift does not propagate through
+ * the clock tree hierarchy, which sets it apart from clock rates and
+ * clock accuracy. A parent clock phase attribute does not have an
+ * impact on the phase attribute of a child clock.
+ */
+int clk_set_phase(struct clk *clk, int degrees)
+{
+       int ret = 0;
+
+       if (!clk)
+               goto out;
+
+       /* sanity check degrees */
+       degrees %= 360;
+       if (degrees < 0)
+               degrees += 360;
+
+       clk_prepare_lock();
+
+       if (!clk->ops->set_phase)
+               goto out_unlock;
+
+       ret = clk->ops->set_phase(clk->hw, degrees);
+
+       if (!ret)
+               clk->phase = degrees;
+
+out_unlock:
+       clk_prepare_unlock();
+
+out:
+       return ret;
+}
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk)
+{
+       int ret = 0;
+
+       if (!clk)
+               goto out;
+
+       clk_prepare_lock();
+       ret = clk->phase;
+       clk_prepare_unlock();
+
+out:
+       return ret;
+}
+
 /**
  * __clk_init - initialize the data structures in a struct clk
  * @dev:       device initializing this clk, placeholder for now
@@ -1843,6 +1920,16 @@ int __clk_init(struct device *dev, struct clk *clk)
        else
                clk->accuracy = 0;
 
+       /*
+        * Set clk's phase.
+        * Since a phase is by definition relative to its parent, just
+        * query the current clock phase, or just assume it's in phase.
+        */
+       if (clk->ops->get_phase)
+               clk->phase = clk->ops->get_phase(clk->hw);
+       else
+               clk->phase = 0;
+
        /*
         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
         * simple clocks and lazy developers the default fallback is to use the
index e5fcfb4e32efa2b8bb24d32bb7ac19fec74b2c55..3f369c60fe56a9f6d449ef82c51ffca141cf8aee 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <linux/of_address.h>
 #include <dt-bindings/clock/hix5hd2-clock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
 #include "clk.h"
 
 static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
@@ -48,9 +50,9 @@ static const char *sfc_mux_p[] __initconst = {
                "24m", "150m", "200m", "100m", "75m", };
 static u32 sfc_mux_table[] = {0, 4, 5, 6, 7};
 
-static const char *sdio1_mux_p[] __initconst = {
+static const char *sdio_mux_p[] __initconst = {
                "75m", "100m", "50m", "15m", };
-static u32 sdio1_mux_table[] = {0, 1, 2, 3};
+static u32 sdio_mux_table[] = {0, 1, 2, 3};
 
 static const char *fephy_mux_p[] __initconst = { "25m", "125m"};
 static u32 fephy_mux_table[] = {0, 1};
@@ -59,28 +61,243 @@ static u32 fephy_mux_table[] = {0, 1};
 static struct hisi_mux_clock hix5hd2_mux_clks[] __initdata = {
        { HIX5HD2_SFC_MUX, "sfc_mux", sfc_mux_p, ARRAY_SIZE(sfc_mux_p),
                CLK_SET_RATE_PARENT, 0x5c, 8, 3, 0, sfc_mux_table, },
-       { HIX5HD2_MMC_MUX, "mmc_mux", sdio1_mux_p, ARRAY_SIZE(sdio1_mux_p),
-               CLK_SET_RATE_PARENT, 0xa0, 8, 2, 0, sdio1_mux_table, },
+       { HIX5HD2_MMC_MUX, "mmc_mux", sdio_mux_p, ARRAY_SIZE(sdio_mux_p),
+               CLK_SET_RATE_PARENT, 0xa0, 8, 2, 0, sdio_mux_table, },
+       { HIX5HD2_SD_MUX, "sd_mux", sdio_mux_p, ARRAY_SIZE(sdio_mux_p),
+               CLK_SET_RATE_PARENT, 0x9c, 8, 2, 0, sdio_mux_table, },
        { HIX5HD2_FEPHY_MUX, "fephy_mux",
                fephy_mux_p, ARRAY_SIZE(fephy_mux_p),
                CLK_SET_RATE_PARENT, 0x120, 8, 2, 0, fephy_mux_table, },
 };
 
 static struct hisi_gate_clock hix5hd2_gate_clks[] __initdata = {
-       /*sfc*/
+       /* sfc */
        { HIX5HD2_SFC_CLK, "clk_sfc", "sfc_mux",
                CLK_SET_RATE_PARENT, 0x5c, 0, 0, },
        { HIX5HD2_SFC_RST, "rst_sfc", "clk_sfc",
                CLK_SET_RATE_PARENT, 0x5c, 4, CLK_GATE_SET_TO_DISABLE, },
-       /*sdio1*/
+       /* sdio0 */
+       { HIX5HD2_SD_BIU_CLK, "clk_sd_biu", "200m",
+               CLK_SET_RATE_PARENT, 0x9c, 0, 0, },
+       { HIX5HD2_SD_CIU_CLK, "clk_sd_ciu", "sd_mux",
+               CLK_SET_RATE_PARENT, 0x9c, 1, 0, },
+       { HIX5HD2_SD_CIU_RST, "rst_sd_ciu", "clk_sd_ciu",
+               CLK_SET_RATE_PARENT, 0x9c, 4, CLK_GATE_SET_TO_DISABLE, },
+       /* sdio1 */
        { HIX5HD2_MMC_BIU_CLK, "clk_mmc_biu", "200m",
                CLK_SET_RATE_PARENT, 0xa0, 0, 0, },
        { HIX5HD2_MMC_CIU_CLK, "clk_mmc_ciu", "mmc_mux",
                CLK_SET_RATE_PARENT, 0xa0, 1, 0, },
        { HIX5HD2_MMC_CIU_RST, "rst_mmc_ciu", "clk_mmc_ciu",
                CLK_SET_RATE_PARENT, 0xa0, 4, CLK_GATE_SET_TO_DISABLE, },
+       /* gsf */
+       { HIX5HD2_FWD_BUS_CLK, "clk_fwd_bus", NULL, 0, 0xcc, 0, 0, },
+       { HIX5HD2_FWD_SYS_CLK, "clk_fwd_sys", "clk_fwd_bus", 0, 0xcc, 5, 0, },
+       { HIX5HD2_MAC0_PHY_CLK, "clk_fephy", "clk_fwd_sys",
+                CLK_SET_RATE_PARENT, 0x120, 0, 0, },
+       /* wdg0 */
+       { HIX5HD2_WDG0_CLK, "clk_wdg0", "24m",
+               CLK_SET_RATE_PARENT, 0x178, 0, 0, },
+       { HIX5HD2_WDG0_RST, "rst_wdg0", "clk_wdg0",
+               CLK_SET_RATE_PARENT, 0x178, 4, CLK_GATE_SET_TO_DISABLE, },
+       /* I2C */
+       {HIX5HD2_I2C0_CLK, "clk_i2c0", "100m",
+                CLK_SET_RATE_PARENT, 0x06c, 4, 0, },
+       {HIX5HD2_I2C0_RST, "rst_i2c0", "clk_i2c0",
+                CLK_SET_RATE_PARENT, 0x06c, 5, CLK_GATE_SET_TO_DISABLE, },
+       {HIX5HD2_I2C1_CLK, "clk_i2c1", "100m",
+                CLK_SET_RATE_PARENT, 0x06c, 8, 0, },
+       {HIX5HD2_I2C1_RST, "rst_i2c1", "clk_i2c1",
+                CLK_SET_RATE_PARENT, 0x06c, 9, CLK_GATE_SET_TO_DISABLE, },
+       {HIX5HD2_I2C2_CLK, "clk_i2c2", "100m",
+                CLK_SET_RATE_PARENT, 0x06c, 12, 0, },
+       {HIX5HD2_I2C2_RST, "rst_i2c2", "clk_i2c2",
+                CLK_SET_RATE_PARENT, 0x06c, 13, CLK_GATE_SET_TO_DISABLE, },
+       {HIX5HD2_I2C3_CLK, "clk_i2c3", "100m",
+                CLK_SET_RATE_PARENT, 0x06c, 16, 0, },
+       {HIX5HD2_I2C3_RST, "rst_i2c3", "clk_i2c3",
+                CLK_SET_RATE_PARENT, 0x06c, 17, CLK_GATE_SET_TO_DISABLE, },
+       {HIX5HD2_I2C4_CLK, "clk_i2c4", "100m",
+                CLK_SET_RATE_PARENT, 0x06c, 20, 0, },
+       {HIX5HD2_I2C4_RST, "rst_i2c4", "clk_i2c4",
+                CLK_SET_RATE_PARENT, 0x06c, 21, CLK_GATE_SET_TO_DISABLE, },
+       {HIX5HD2_I2C5_CLK, "clk_i2c5", "100m",
+                CLK_SET_RATE_PARENT, 0x06c, 0, 0, },
+       {HIX5HD2_I2C5_RST, "rst_i2c5", "clk_i2c5",
+                CLK_SET_RATE_PARENT, 0x06c, 1, CLK_GATE_SET_TO_DISABLE, },
 };
 
+enum hix5hd2_clk_type {
+       TYPE_COMPLEX,
+       TYPE_ETHER,
+};
+
+struct hix5hd2_complex_clock {
+       const char      *name;
+       const char      *parent_name;
+       u32             id;
+       u32             ctrl_reg;
+       u32             ctrl_clk_mask;
+       u32             ctrl_rst_mask;
+       u32             phy_reg;
+       u32             phy_clk_mask;
+       u32             phy_rst_mask;
+       enum hix5hd2_clk_type type;
+};
+
+struct hix5hd2_clk_complex {
+       struct clk_hw   hw;
+       u32             id;
+       void __iomem    *ctrl_reg;
+       u32             ctrl_clk_mask;
+       u32             ctrl_rst_mask;
+       void __iomem    *phy_reg;
+       u32             phy_clk_mask;
+       u32             phy_rst_mask;
+};
+
+static struct hix5hd2_complex_clock hix5hd2_complex_clks[] __initdata = {
+       {"clk_mac0", "clk_fephy", HIX5HD2_MAC0_CLK,
+               0xcc, 0xa, 0x500, 0x120, 0, 0x10, TYPE_ETHER},
+       {"clk_mac1", "clk_fwd_sys", HIX5HD2_MAC1_CLK,
+               0xcc, 0x14, 0xa00, 0x168, 0x2, 0, TYPE_ETHER},
+       {"clk_sata", NULL, HIX5HD2_SATA_CLK,
+               0xa8, 0x1f, 0x300, 0xac, 0x1, 0x0, TYPE_COMPLEX},
+       {"clk_usb", NULL, HIX5HD2_USB_CLK,
+               0xb8, 0xff, 0x3f000, 0xbc, 0x7, 0x3f00, TYPE_COMPLEX},
+};
+
+#define to_complex_clk(_hw) container_of(_hw, struct hix5hd2_clk_complex, hw)
+
+static int clk_ether_prepare(struct clk_hw *hw)
+{
+       struct hix5hd2_clk_complex *clk = to_complex_clk(hw);
+       u32 val;
+
+       val = readl_relaxed(clk->ctrl_reg);
+       val |= clk->ctrl_clk_mask | clk->ctrl_rst_mask;
+       writel_relaxed(val, clk->ctrl_reg);
+       val &= ~(clk->ctrl_rst_mask);
+       writel_relaxed(val, clk->ctrl_reg);
+
+       val = readl_relaxed(clk->phy_reg);
+       val |= clk->phy_clk_mask;
+       val &= ~(clk->phy_rst_mask);
+       writel_relaxed(val, clk->phy_reg);
+       mdelay(10);
+
+       val &= ~(clk->phy_clk_mask);
+       val |= clk->phy_rst_mask;
+       writel_relaxed(val, clk->phy_reg);
+       mdelay(10);
+
+       val |= clk->phy_clk_mask;
+       val &= ~(clk->phy_rst_mask);
+       writel_relaxed(val, clk->phy_reg);
+       mdelay(30);
+       return 0;
+}
+
+static void clk_ether_unprepare(struct clk_hw *hw)
+{
+       struct hix5hd2_clk_complex *clk = to_complex_clk(hw);
+       u32 val;
+
+       val = readl_relaxed(clk->ctrl_reg);
+       val &= ~(clk->ctrl_clk_mask);
+       writel_relaxed(val, clk->ctrl_reg);
+}
+
+static struct clk_ops clk_ether_ops = {
+       .prepare = clk_ether_prepare,
+       .unprepare = clk_ether_unprepare,
+};
+
+static int clk_complex_enable(struct clk_hw *hw)
+{
+       struct hix5hd2_clk_complex *clk = to_complex_clk(hw);
+       u32 val;
+
+       val = readl_relaxed(clk->ctrl_reg);
+       val |= clk->ctrl_clk_mask;
+       val &= ~(clk->ctrl_rst_mask);
+       writel_relaxed(val, clk->ctrl_reg);
+
+       val = readl_relaxed(clk->phy_reg);
+       val |= clk->phy_clk_mask;
+       val &= ~(clk->phy_rst_mask);
+       writel_relaxed(val, clk->phy_reg);
+
+       return 0;
+}
+
+static void clk_complex_disable(struct clk_hw *hw)
+{
+       struct hix5hd2_clk_complex *clk = to_complex_clk(hw);
+       u32 val;
+
+       val = readl_relaxed(clk->ctrl_reg);
+       val |= clk->ctrl_rst_mask;
+       val &= ~(clk->ctrl_clk_mask);
+       writel_relaxed(val, clk->ctrl_reg);
+
+       val = readl_relaxed(clk->phy_reg);
+       val |= clk->phy_rst_mask;
+       val &= ~(clk->phy_clk_mask);
+       writel_relaxed(val, clk->phy_reg);
+}
+
+static struct clk_ops clk_complex_ops = {
+       .enable = clk_complex_enable,
+       .disable = clk_complex_disable,
+};
+
+void __init hix5hd2_clk_register_complex(struct hix5hd2_complex_clock *clks,
+                                        int nums, struct hisi_clock_data *data)
+{
+       void __iomem *base = data->base;
+       int i;
+
+       for (i = 0; i < nums; i++) {
+               struct hix5hd2_clk_complex *p_clk;
+               struct clk *clk;
+               struct clk_init_data init;
+
+               p_clk = kzalloc(sizeof(*p_clk), GFP_KERNEL);
+               if (!p_clk)
+                       return;
+
+               init.name = clks[i].name;
+               if (clks[i].type == TYPE_ETHER)
+                       init.ops = &clk_ether_ops;
+               else
+                       init.ops = &clk_complex_ops;
+
+               init.flags = CLK_IS_BASIC;
+               init.parent_names =
+                       (clks[i].parent_name ? &clks[i].parent_name : NULL);
+               init.num_parents = (clks[i].parent_name ? 1 : 0);
+
+               p_clk->ctrl_reg = base + clks[i].ctrl_reg;
+               p_clk->ctrl_clk_mask = clks[i].ctrl_clk_mask;
+               p_clk->ctrl_rst_mask = clks[i].ctrl_rst_mask;
+               p_clk->phy_reg = base + clks[i].phy_reg;
+               p_clk->phy_clk_mask = clks[i].phy_clk_mask;
+               p_clk->phy_rst_mask = clks[i].phy_rst_mask;
+               p_clk->hw.init = &init;
+
+               clk = clk_register(NULL, &p_clk->hw);
+               if (IS_ERR(clk)) {
+                       kfree(p_clk);
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+
+               data->clk_data.clks[clks[i].id] = clk;
+       }
+}
+
 static void __init hix5hd2_clk_init(struct device_node *np)
 {
        struct hisi_clock_data *clk_data;
@@ -96,6 +313,9 @@ static void __init hix5hd2_clk_init(struct device_node *np)
                                        clk_data);
        hisi_clk_register_gate(hix5hd2_gate_clks,
                        ARRAY_SIZE(hix5hd2_gate_clks), clk_data);
+       hix5hd2_clk_register_complex(hix5hd2_complex_clks,
+                                    ARRAY_SIZE(hix5hd2_complex_clks),
+                                    clk_data);
 }
 
 CLK_OF_DECLARE(hix5hd2_clk, "hisilicon,hix5hd2-clock", hix5hd2_clk_init);
index bef198a83863aaa79e21ca1c4e1401238cae31b7..756f0f39d6a3d9f4d3ffcd19a9eae6166c7ba3e3 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #define SARL                           0       /* Low part [0:31] */
+#define         SARL_A370_SSCG_ENABLE          BIT(10)
 #define         SARL_A370_PCLK_FREQ_OPT        11
 #define         SARL_A370_PCLK_FREQ_OPT_MASK   0xF
 #define         SARL_A370_FAB_FREQ_OPT         15
@@ -133,10 +134,17 @@ static void __init a370_get_clk_ratio(
        }
 }
 
+static bool a370_is_sscg_enabled(void __iomem *sar)
+{
+       return !(readl(sar) & SARL_A370_SSCG_ENABLE);
+}
+
 static const struct coreclk_soc_desc a370_coreclks = {
        .get_tclk_freq = a370_get_tclk_freq,
        .get_cpu_freq = a370_get_cpu_freq,
        .get_clk_ratio = a370_get_clk_ratio,
+       .is_sscg_enabled = a370_is_sscg_enabled,
+       .fix_sscg_deviation = kirkwood_fix_sscg_deviation,
        .ratios = a370_coreclk_ratios,
        .num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
 };
index c991a4d95e108d594f7ab0930a202e7eb7ea7fe3..c7af2242b796866a9367b02c0a74827499901a1f 100644 (file)
  * all modified at the same time, and not separately as for the Armada
  * 370 or the Armada XP SoCs.
  *
- * SAR0[21:17]   : CPU frequency    DDR frequency   L2 frequency
+ * SAR1[21:17]   : CPU frequency    DDR frequency   L2 frequency
  *              6   =  400 MHz     400 MHz         200 MHz
  *              15  =  600 MHz     600 MHz         300 MHz
  *              21  =  800 MHz     534 MHz         400 MHz
  *              25  = 1000 MHz     500 MHz         500 MHz
  *              others reserved.
  *
- * SAR0[22]   : TCLK frequency
+ * SAR1[22]   : TCLK frequency
  *              0 = 166 MHz
  *              1 = 200 MHz
  */
index 8145c4efc381ffa211da406ca57ce124aec982c4..7f8a33ab265b5f799318b44bca0351cc707d74c3 100644 (file)
  * Core Clocks
  */
 
+#define SSCG_CONF_MODE(reg)    (((reg) >> 16) & 0x3)
+#define SSCG_SPREAD_DOWN       0x0
+#define SSCG_SPREAD_UP         0x1
+#define SSCG_SPREAD_CENTRAL    0x2
+#define SSCG_CONF_LOW(reg)     (((reg) >> 8) & 0xFF)
+#define SSCG_CONF_HIGH(reg)    ((reg) & 0xFF)
+
 static struct clk_onecell_data clk_data;
 
+/*
+ * This function can be used by the Kirkwood, the Armada 370, the
+ * Armada XP and the Armada 375 SoC. The name of the function was
+ * chosen following the dt convention: using the first known SoC
+ * compatible with it.
+ */
+u32 kirkwood_fix_sscg_deviation(struct device_node *np, u32 system_clk)
+{
+       struct device_node *sscg_np = NULL;
+       void __iomem *sscg_map;
+       u32 sscg_reg;
+       s32 low_bound, high_bound;
+       u64 freq_swing_half;
+
+       sscg_np = of_find_node_by_name(np, "sscg");
+       if (sscg_np == NULL) {
+               pr_err("cannot get SSCG register node\n");
+               return system_clk;
+       }
+
+       sscg_map = of_iomap(sscg_np, 0);
+       if (sscg_map == NULL) {
+               pr_err("cannot map SSCG register\n");
+               goto out;
+       }
+
+       sscg_reg = readl(sscg_map);
+       high_bound = SSCG_CONF_HIGH(sscg_reg);
+       low_bound = SSCG_CONF_LOW(sscg_reg);
+
+       if ((high_bound - low_bound) <= 0)
+               goto out;
+       /*
+        * From Marvell engineer we got the following formula (when
+        * this code was written, the datasheet was erroneous)
+        * Spread percentage = 1/96 * (H - L) / H
+        * H = SSCG_High_Boundary
+        * L = SSCG_Low_Boundary
+        *
+        * As the deviation is half of spread then it lead to the
+        * following formula in the code.
+        *
+        * To avoid an overflow and not lose any significant digit in
+        * the same time we have to use a 64 bit integer.
+        */
+
+       freq_swing_half = (((u64)high_bound - (u64)low_bound)
+                       * (u64)system_clk);
+       do_div(freq_swing_half, (2 * 96 * high_bound));
+
+       switch (SSCG_CONF_MODE(sscg_reg)) {
+       case SSCG_SPREAD_DOWN:
+               system_clk -= freq_swing_half;
+               break;
+       case SSCG_SPREAD_UP:
+               system_clk += freq_swing_half;
+               break;
+       case SSCG_SPREAD_CENTRAL:
+       default:
+               break;
+       }
+
+       iounmap(sscg_map);
+
+out:
+       of_node_put(sscg_np);
+
+       return system_clk;
+}
+
 void __init mvebu_coreclk_setup(struct device_node *np,
                                const struct coreclk_soc_desc *desc)
 {
@@ -62,6 +139,11 @@ void __init mvebu_coreclk_setup(struct device_node *np,
        of_property_read_string_index(np, "clock-output-names", 1,
                                      &cpuclk_name);
        rate = desc->get_cpu_freq(base);
+
+       if (desc->is_sscg_enabled && desc->fix_sscg_deviation
+               && desc->is_sscg_enabled(base))
+               rate = desc->fix_sscg_deviation(np, rate);
+
        clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
                                                   CLK_IS_ROOT, rate);
        WARN_ON(IS_ERR(clk_data.clks[1]));
index 8cd28e47471c3234e22610721fbcb563917737e5..8f8db7eac3f653c66ccc2cbb45e8629530361494 100644 (file)
@@ -30,6 +30,8 @@ struct coreclk_soc_desc {
        u32 (*get_tclk_freq)(void __iomem *sar);
        u32 (*get_cpu_freq)(void __iomem *sar);
        void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div);
+       bool (*is_sscg_enabled)(void __iomem *sar);
+       u32 (*fix_sscg_deviation)(struct device_node *np, u32 system_clk);
        const struct coreclk_ratio *ratios;
        int num_ratios;
 };
@@ -47,4 +49,9 @@ void __init mvebu_coreclk_setup(struct device_node *np,
 void __init mvebu_clk_gating_setup(struct device_node *np,
                                   const struct clk_gating_soc_desc *desc);
 
+/*
+ * This function is shared among the Kirkwood, Armada 370, Armada XP
+ * and Armada 375 SoC
+ */
+u32 kirkwood_fix_sscg_deviation(struct device_node *np, u32 system_clk);
 #endif
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
new file mode 100644 (file)
index 0000000..4ff2abc
--- /dev/null
@@ -0,0 +1,2 @@
+obj-y                          += clk-pxa.o
+obj-$(CONFIG_PXA27x)           += clk-pxa27x.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
new file mode 100644 (file)
index 0000000..ef3c053
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Marvell PXA family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Common clock code for PXA clocks ("CKEN" type clocks + DT)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+DEFINE_SPINLOCK(lock);
+
+static struct clk *pxa_clocks[CLK_MAX];
+static struct clk_onecell_data onecell_data = {
+       .clks = pxa_clocks,
+       .clk_num = CLK_MAX,
+};
+
+#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk_cken, hw)
+
+static unsigned long cken_recalc_rate(struct clk_hw *hw,
+                                     unsigned long parent_rate)
+{
+       struct pxa_clk_cken *pclk = to_pxa_clk(hw);
+       struct clk_fixed_factor *fix;
+
+       if (!pclk->is_in_low_power || pclk->is_in_low_power())
+               fix = &pclk->lp;
+       else
+               fix = &pclk->hp;
+       fix->hw.clk = hw->clk;
+       return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
+}
+
+static struct clk_ops cken_rate_ops = {
+       .recalc_rate = cken_recalc_rate,
+};
+
+static u8 cken_get_parent(struct clk_hw *hw)
+{
+       struct pxa_clk_cken *pclk = to_pxa_clk(hw);
+
+       if (!pclk->is_in_low_power)
+               return 0;
+       return pclk->is_in_low_power() ? 0 : 1;
+}
+
+static struct clk_ops cken_mux_ops = {
+       .get_parent = cken_get_parent,
+       .set_parent = dummy_clk_set_parent,
+};
+
+void __init clkdev_pxa_register(int ckid, const char *con_id,
+                               const char *dev_id, struct clk *clk)
+{
+       if (!IS_ERR(clk) && (ckid != CLK_NONE))
+               pxa_clocks[ckid] = clk;
+       if (!IS_ERR(clk))
+               clk_register_clkdev(clk, con_id, dev_id);
+}
+
+int __init clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks)
+{
+       int i;
+       struct pxa_clk_cken *pclk;
+       struct clk *clk;
+
+       for (i = 0; i < nb_clks; i++) {
+               pclk = clks + i;
+               pclk->gate.lock = &lock;
+               clk = clk_register_composite(NULL, pclk->name,
+                                            pclk->parent_names, 2,
+                                            &pclk->hw, &cken_mux_ops,
+                                            &pclk->hw, &cken_rate_ops,
+                                            &pclk->gate.hw, &clk_gate_ops,
+                                            pclk->flags);
+               clkdev_pxa_register(pclk->ckid, pclk->con_id, pclk->dev_id,
+                                   clk);
+       }
+       return 0;
+}
+
+static void __init pxa_dt_clocks_init(struct device_node *np)
+{
+       of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa-clocks", pxa_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
new file mode 100644 (file)
index 0000000..5fe219d
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Marvell PXA family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Common clock code for PXA clocks ("CKEN" type clocks + DT)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+#ifndef _CLK_PXA_
+#define _CLK_PXA_
+
+#define PARENTS(name) \
+       static const char *name ## _parents[] __initconst
+#define MUX_RO_RATE_RO_OPS(name, clk_name)                     \
+       static struct clk_hw name ## _mux_hw;                   \
+       static struct clk_hw name ## _rate_hw;                  \
+       static struct clk_ops name ## _mux_ops = {              \
+               .get_parent = name ## _get_parent,              \
+               .set_parent = dummy_clk_set_parent,             \
+       };                                                      \
+       static struct clk_ops name ## _rate_ops = {             \
+               .recalc_rate = name ## _get_rate,               \
+       };                                                      \
+       static struct clk *clk_register_ ## name(void)          \
+       {                                                       \
+               return clk_register_composite(NULL, clk_name,   \
+                       name ## _parents,                       \
+                       ARRAY_SIZE(name ## _parents),           \
+                       &name ## _mux_hw, &name ## _mux_ops,    \
+                       &name ## _rate_hw, &name ## _rate_ops,  \
+                       NULL, NULL, CLK_GET_RATE_NOCACHE);      \
+       }
+
+#define RATE_RO_OPS(name, clk_name)                    \
+       static struct clk_hw name ## _rate_hw;                  \
+       static struct clk_ops name ## _rate_ops = {             \
+               .recalc_rate = name ## _get_rate,               \
+       };                                                      \
+       static struct clk *clk_register_ ## name(void)          \
+       {                                                       \
+               return clk_register_composite(NULL, clk_name,   \
+                       name ## _parents,                       \
+                       ARRAY_SIZE(name ## _parents),           \
+                       NULL, NULL,                             \
+                       &name ## _rate_hw, &name ## _rate_ops,  \
+                       NULL, NULL, CLK_GET_RATE_NOCACHE);      \
+       }
+
+/*
+ * CKEN clock type
+ * This clock takes it source from 2 possible parents :
+ *  - a low power parent
+ *  - a normal parent
+ *
+ *  +------------+     +-----------+
+ *  |  Low Power | --- | x mult_lp |
+ *  |    Clock   |     | / div_lp  |\
+ *  +------------+     +-----------+ \+-----+   +-----------+
+ *                                    | Mux |---| CKEN gate |
+ *  +------------+     +-----------+ /+-----+   +-----------+
+ *  | High Power |     | x mult_hp |/
+ *  |    Clock   | --- | / div_hp  |
+ *  +------------+     +-----------+
+ */
+struct pxa_clk_cken {
+       struct clk_hw hw;
+       int ckid;
+       const char *name;
+       const char *dev_id;
+       const char *con_id;
+       const char **parent_names;
+       struct clk_fixed_factor lp;
+       struct clk_fixed_factor hp;
+       struct clk_gate gate;
+       bool (*is_in_low_power)(void);
+       const unsigned long flags;
+};
+
+#define PXA_CKEN(_dev_id, _con_id, _name, parents, _mult_lp, _div_lp,  \
+                _mult_hp, _div_hp, is_lp, _cken_reg, _cken_bit, flag)  \
+       { .ckid = CLK_ ## _name, .name = #_name,                        \
+         .dev_id = _dev_id, .con_id = _con_id, .parent_names = parents,\
+         .lp = { .mult = _mult_lp, .div = _div_lp },                   \
+         .hp = { .mult = _mult_hp, .div = _div_hp },                   \
+         .is_in_low_power = is_lp,                                     \
+         .gate = { .reg = (void __iomem *)_cken_reg, .bit_idx = _cken_bit }, \
+         .flags = flag,                                                \
+       }
+#define PXA_CKEN_1RATE(dev_id, con_id, name, parents, cken_reg,                \
+                           cken_bit, flag)                             \
+       PXA_CKEN(dev_id, con_id, name, parents, 1, 1, 1, 1,             \
+                NULL, cken_reg, cken_bit, flag)
+
+static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+       return 0;
+}
+
+extern void clkdev_pxa_register(int ckid, const char *con_id,
+                               const char *dev_id, struct clk *clk);
+extern int clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks);
+
+#endif
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
new file mode 100644 (file)
index 0000000..b345cc7
--- /dev/null
@@ -0,0 +1,370 @@
+/*
+ * Marvell PXA27x family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/clock.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+#include <linux/clk-provider.h>
+#include <mach/pxa2xx-regs.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+       PXA_CORE_13Mhz = 0,
+       PXA_CORE_RUN,
+       PXA_CORE_TURBO,
+};
+
+enum {
+       PXA_BUS_13Mhz = 0,
+       PXA_BUS_RUN,
+};
+
+enum {
+       PXA_LCD_13Mhz = 0,
+       PXA_LCD_RUN,
+};
+
+enum {
+       PXA_MEM_13Mhz = 0,
+       PXA_MEM_SYSTEM_BUS,
+       PXA_MEM_RUN,
+};
+
+static const char * const get_freq_khz[] = {
+       "core", "run", "cpll", "memory",
+       "system_bus"
+};
+
+/*
+ * Get the clock frequency as reflected by CCSR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa27x_get_clk_frequency_khz(int info)
+{
+       struct clk *clk;
+       unsigned long clks[5];
+       int i;
+
+       for (i = 0; i < 5; i++) {
+               clk = clk_get(NULL, get_freq_khz[i]);
+               if (IS_ERR(clk)) {
+                       clks[i] = 0;
+               } else {
+                       clks[i] = clk_get_rate(clk);
+                       clk_put(clk);
+               }
+       }
+       if (info) {
+               pr_info("Run Mode clock: %ld.%02ldMHz\n",
+                       clks[1] / 1000000, (clks[1] % 1000000) / 10000);
+               pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+                       clks[2] / 1000000, (clks[2] % 1000000) / 10000);
+               pr_info("Memory clock: %ld.%02ldMHz\n",
+                       clks[3] / 1000000, (clks[3] % 1000000) / 10000);
+               pr_info("System bus clock: %ld.%02ldMHz\n",
+                       clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+       }
+       return (unsigned int)clks[0];
+}
+
+bool pxa27x_is_ppll_disabled(void)
+{
+       unsigned long ccsr = CCSR;
+
+       return ccsr & (1 << CCCR_PPDIS_BIT);
+}
+
+#define PXA27X_CKEN(dev_id, con_id, parents, mult_hp, div_hp,          \
+                   bit, is_lp, flags)                                  \
+       PXA_CKEN(dev_id, con_id, bit, parents, 1, 1, mult_hp, div_hp,   \
+                is_lp,  &CKEN, CKEN_ ## bit, flags)
+#define PXA27X_PBUS_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)  \
+       PXA27X_CKEN(dev_id, con_id, pxa27x_pbus_parents, mult_hp,       \
+                   div_hp, bit, pxa27x_is_ppll_disabled, 0)
+
+PARENTS(pxa27x_pbus) = { "osc_13mhz", "ppll_312mhz" };
+PARENTS(pxa27x_sbus) = { "system_bus", "system_bus" };
+PARENTS(pxa27x_32Mhz_bus) = { "osc_32_768khz", "osc_32_768khz" };
+PARENTS(pxa27x_lcd_bus) = { "lcd_base", "lcd_base" };
+PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
+
+#define PXA27X_CKEN_1RATE(dev_id, con_id, bit, parents, delay)         \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      &CKEN, CKEN_ ## bit, 0)
+#define PXA27X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay)      \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+
+static struct pxa_clk_cken pxa27x_clocks[] = {
+       PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
+       PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1),
+       PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1),
+       PXA27X_PBUS_CKEN("pxa2xx-i2s", NULL, I2S, 2, 51, 0),
+       PXA27X_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 19, 0),
+       PXA27X_PBUS_CKEN("pxa27x-udc", NULL, USB, 2, 13, 5),
+       PXA27X_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC, 2, 32, 0),
+       PXA27X_PBUS_CKEN("pxa2xx-ir", "FICPCLK", FICP, 2, 13, 0),
+       PXA27X_PBUS_CKEN("pxa27x-ohci", NULL, USBHOST, 2, 13, 0),
+       PXA27X_PBUS_CKEN("pxa2xx-i2c.1", NULL, PWRI2C, 1, 24, 0),
+       PXA27X_PBUS_CKEN("pxa27x-ssp.0", NULL, SSP1, 1, 24, 0),
+       PXA27X_PBUS_CKEN("pxa27x-ssp.1", NULL, SSP2, 1, 24, 0),
+       PXA27X_PBUS_CKEN("pxa27x-ssp.2", NULL, SSP3, 1, 24, 0),
+       PXA27X_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 24, 0),
+       PXA27X_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 24, 0),
+       PXA27X_PBUS_CKEN(NULL, "MSLCLK", MSL, 2, 13, 0),
+       PXA27X_PBUS_CKEN(NULL, "USIMCLK", USIM, 2, 13, 0),
+       PXA27X_PBUS_CKEN(NULL, "MSTKCLK", MEMSTK, 2, 32, 0),
+       PXA27X_PBUS_CKEN(NULL, "AC97CLK", AC97, 1, 1, 0),
+       PXA27X_PBUS_CKEN(NULL, "AC97CONFCLK", AC97CONF, 1, 1, 0),
+       PXA27X_PBUS_CKEN(NULL, "OSTIMER0", OSTIMER, 1, 96, 0),
+
+       PXA27X_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
+                         pxa27x_32Mhz_bus_parents, 0),
+       PXA27X_CKEN_1RATE(NULL, "IMCLK", IM, pxa27x_sbus_parents, 0),
+       PXA27X_CKEN_1RATE("pxa2xx-fb", NULL, LCD, pxa27x_lcd_bus_parents, 0),
+       PXA27X_CKEN_1RATE("pxa27x-camera.0", NULL, CAMERA,
+                         pxa27x_lcd_bus_parents, 0),
+       PXA27X_CKEN_1RATE_AO("pxa2xx-pcmcia", NULL, MEMC,
+                            pxa27x_membus_parents, 0),
+
+};
+
+static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       unsigned long clkcfg;
+       unsigned int t, ht;
+       unsigned int l, L, n2, N;
+       unsigned long ccsr = CCSR;
+
+       asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+       t  = clkcfg & (1 << 0);
+       ht = clkcfg & (1 << 2);
+
+       l  = ccsr & CCSR_L_MASK;
+       n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
+       L  = l * parent_rate;
+       N  = (L * n2) / 2;
+
+       return t ? N : L;
+}
+PARENTS(clk_pxa27x_cpll) = { "osc_13mhz" };
+RATE_RO_OPS(clk_pxa27x_cpll, "cpll");
+
+static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       unsigned int l, osc_forced;
+       unsigned long ccsr = CCSR;
+       unsigned long cccr = CCCR;
+
+       l  = ccsr & CCSR_L_MASK;
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       if (osc_forced) {
+               if (cccr & (1 << CCCR_LCD_26_BIT))
+                       return parent_rate * 2;
+               else
+                       return parent_rate;
+       }
+
+       if (l <= 7)
+               return parent_rate;
+       if (l <= 16)
+               return parent_rate / 2;
+       return parent_rate / 4;
+}
+
+static u8 clk_pxa27x_lcd_base_get_parent(struct clk_hw *hw)
+{
+       unsigned int osc_forced;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       if (osc_forced)
+               return PXA_LCD_13Mhz;
+       else
+               return PXA_LCD_RUN;
+}
+
+PARENTS(clk_pxa27x_lcd_base) = { "osc_13mhz", "run" };
+MUX_RO_RATE_RO_OPS(clk_pxa27x_lcd_base, "lcd_base");
+
+static void __init pxa27x_register_plls(void)
+{
+       clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               13 * MHz);
+       clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               32768 * KHz);
+       clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+       clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
+}
+
+static unsigned long clk_pxa27x_core_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       unsigned long clkcfg;
+       unsigned int t, ht, b, osc_forced;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+       t  = clkcfg & (1 << 0);
+       ht = clkcfg & (1 << 2);
+       b  = clkcfg & (1 << 3);
+
+       if (osc_forced)
+               return parent_rate;
+       if (ht)
+               return parent_rate / 2;
+       else
+               return parent_rate;
+}
+
+static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
+{
+       unsigned long clkcfg;
+       unsigned int t, ht, b, osc_forced;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       if (osc_forced)
+               return PXA_CORE_13Mhz;
+
+       asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+       t  = clkcfg & (1 << 0);
+       ht = clkcfg & (1 << 2);
+       b  = clkcfg & (1 << 3);
+
+       if (ht || t)
+               return PXA_CORE_TURBO;
+       return PXA_CORE_RUN;
+}
+PARENTS(clk_pxa27x_core) = { "osc_13mhz", "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa27x_core, "core");
+
+static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long ccsr = CCSR;
+       unsigned int n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
+
+       return (parent_rate / n2) * 2;
+}
+PARENTS(clk_pxa27x_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa27x_run, "run");
+
+static void __init pxa27x_register_core(void)
+{
+       clk_register_clk_pxa27x_cpll();
+       clk_register_clk_pxa27x_run();
+
+       clkdev_pxa_register(CLK_CORE, "core", NULL,
+                           clk_register_clk_pxa27x_core());
+}
+
+static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
+                                                   unsigned long parent_rate)
+{
+       unsigned long clkcfg;
+       unsigned int b, osc_forced;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+       b  = clkcfg & (1 << 3);
+
+       if (osc_forced)
+               return parent_rate;
+       if (b)
+               return parent_rate / 2;
+       else
+               return parent_rate;
+}
+
+static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
+{
+       unsigned int osc_forced;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       if (osc_forced)
+               return PXA_BUS_13Mhz;
+       else
+               return PXA_BUS_RUN;
+}
+
+PARENTS(clk_pxa27x_system_bus) = { "osc_13mhz", "run" };
+MUX_RO_RATE_RO_OPS(clk_pxa27x_system_bus, "system_bus");
+
+static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       unsigned int a, l, osc_forced;
+       unsigned long cccr = CCCR;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       a = cccr & CCCR_A_BIT;
+       l  = ccsr & CCSR_L_MASK;
+
+       if (osc_forced || a)
+               return parent_rate;
+       if (l <= 10)
+               return parent_rate;
+       if (l <= 20)
+               return parent_rate / 2;
+       return parent_rate / 4;
+}
+
+static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
+{
+       unsigned int osc_forced, a;
+       unsigned long cccr = CCCR;
+       unsigned long ccsr = CCSR;
+
+       osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
+       a = cccr & CCCR_A_BIT;
+       if (osc_forced)
+               return PXA_MEM_13Mhz;
+       if (a)
+               return PXA_MEM_SYSTEM_BUS;
+       else
+               return PXA_MEM_RUN;
+}
+
+PARENTS(clk_pxa27x_memory) = { "osc_13mhz", "system_bus", "run" };
+MUX_RO_RATE_RO_OPS(clk_pxa27x_memory, "memory");
+
+static void __init pxa27x_base_clocks_init(void)
+{
+       pxa27x_register_plls();
+       pxa27x_register_core();
+       clk_register_clk_pxa27x_system_bus();
+       clk_register_clk_pxa27x_memory();
+       clk_register_clk_pxa27x_lcd_base();
+}
+
+static int __init pxa27x_clocks_init(void)
+{
+       pxa27x_base_clocks_init();
+       return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
+}
+postcore_initcall(pxa27x_clocks_init);
index 9db03d3b1657302b5b4ef8e5cca4b19a87edd7e3..b823bc3b625067c3b64b32b79a0b8b3e17541096 100644 (file)
@@ -97,7 +97,7 @@ static unsigned long
 clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 {
        struct clk_pll *pll = to_clk_pll(hw);
-       u32 l, m, n;
+       u32 l, m, n, config;
        unsigned long rate;
        u64 tmp;
 
@@ -116,13 +116,79 @@ clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
                do_div(tmp, n);
                rate += tmp;
        }
+       if (pll->post_div_width) {
+               regmap_read(pll->clkr.regmap, pll->config_reg, &config);
+               config >>= pll->post_div_shift;
+               config &= BIT(pll->post_div_width) - 1;
+               rate /= config + 1;
+       }
+
        return rate;
 }
 
+static const
+struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
+{
+       if (!f)
+               return NULL;
+
+       for (; f->freq; f++)
+               if (rate <= f->freq)
+                       return f;
+
+       return NULL;
+}
+
+static long
+clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                      unsigned long *p_rate, struct clk **p)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       const struct pll_freq_tbl *f;
+
+       f = find_freq(pll->freq_tbl, rate);
+       if (!f)
+               return clk_pll_recalc_rate(hw, *p_rate);
+
+       return f->freq;
+}
+
+static int
+clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       const struct pll_freq_tbl *f;
+       bool enabled;
+       u32 mode;
+       u32 enable_mask = PLL_OUTCTRL | PLL_BYPASSNL | PLL_RESET_N;
+
+       f = find_freq(pll->freq_tbl, rate);
+       if (!f)
+               return -EINVAL;
+
+       regmap_read(pll->clkr.regmap, pll->mode_reg, &mode);
+       enabled = (mode & enable_mask) == enable_mask;
+
+       if (enabled)
+               clk_pll_disable(hw);
+
+       regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, f->l);
+       regmap_update_bits(pll->clkr.regmap, pll->m_reg, 0x7ffff, f->m);
+       regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, f->n);
+       regmap_write(pll->clkr.regmap, pll->config_reg, f->ibits);
+
+       if (enabled)
+               clk_pll_enable(hw);
+
+       return 0;
+}
+
 const struct clk_ops clk_pll_ops = {
        .enable = clk_pll_enable,
        .disable = clk_pll_disable,
        .recalc_rate = clk_pll_recalc_rate,
+       .determine_rate = clk_pll_determine_rate,
+       .set_rate = clk_pll_set_rate,
 };
 EXPORT_SYMBOL_GPL(clk_pll_ops);
 
index 3003e9962472f3ae98ae55709d2d4f347d2ccf40..c9c0cda306d04e45548daa0d5b50f802ab2c04a9 100644 (file)
 #include <linux/clk-provider.h>
 #include "clk-regmap.h"
 
+/**
+ * struct pll_freq_tbl - PLL frequency table
+ * @l: L value
+ * @m: M value
+ * @n: N value
+ * @ibits: internal values
+ */
+struct pll_freq_tbl {
+       unsigned long freq;
+       u16 l;
+       u16 m;
+       u16 n;
+       u32 ibits;
+};
+
 /**
  * struct clk_pll - phase locked loop (PLL)
  * @l_reg: L register
@@ -26,6 +41,7 @@
  * @mode_reg: mode register
  * @status_reg: status register
  * @status_bit: ANDed with @status_reg to determine if PLL is enabled
+ * @freq_tbl: PLL frequency table
  * @hw: handle between common and hardware-specific interfaces
  */
 struct clk_pll {
@@ -36,6 +52,10 @@ struct clk_pll {
        u32     mode_reg;
        u32     status_reg;
        u8      status_bit;
+       u8      post_div_width;
+       u8      post_div_shift;
+
+       const struct pll_freq_tbl *freq_tbl;
 
        struct clk_regmap clkr;
 };
index b638c5846dbfb20e2cc20046afe9823927713921..b6e6959e89aafed9248ba6ddd882e0ece7a4deb1 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/div64.h>
 
 #include "clk-rcg.h"
+#include "common.h"
 
 static u32 ns_to_src(struct src_sel *s, u32 ns)
 {
@@ -67,16 +68,16 @@ static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
        int num_parents = __clk_get_num_parents(hw->clk);
-       u32 ns, ctl;
+       u32 ns, reg;
        int bank;
        int i;
        struct src_sel *s;
 
-       regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
-       bank = reg_to_bank(rcg, ctl);
+       regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+       bank = reg_to_bank(rcg, reg);
        s = &rcg->s[bank];
 
-       regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+       regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
        ns = ns_to_src(s, ns);
 
        for (i = 0; i < num_parents; i++)
@@ -192,90 +193,93 @@ static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val)
 
 static void configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
 {
-       u32 ns, md, ctl, *regp;
+       u32 ns, md, reg;
        int bank, new_bank;
        struct mn *mn;
        struct pre_div *p;
        struct src_sel *s;
        bool enabled;
-       u32 md_reg;
-       u32 bank_reg;
+       u32 md_reg, ns_reg;
        bool banked_mn = !!rcg->mn[1].width;
+       bool banked_p = !!rcg->p[1].pre_div_width;
        struct clk_hw *hw = &rcg->clkr.hw;
 
        enabled = __clk_is_enabled(hw->clk);
 
-       regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
-       regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
-
-       if (banked_mn) {
-               regp = &ctl;
-               bank_reg = rcg->clkr.enable_reg;
-       } else {
-               regp = &ns;
-               bank_reg = rcg->ns_reg;
-       }
-
-       bank = reg_to_bank(rcg, *regp);
+       regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+       bank = reg_to_bank(rcg, reg);
        new_bank = enabled ? !bank : bank;
 
+       ns_reg = rcg->ns_reg[new_bank];
+       regmap_read(rcg->clkr.regmap, ns_reg, &ns);
+
        if (banked_mn) {
                mn = &rcg->mn[new_bank];
                md_reg = rcg->md_reg[new_bank];
 
                ns |= BIT(mn->mnctr_reset_bit);
-               regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+               regmap_write(rcg->clkr.regmap, ns_reg, ns);
 
                regmap_read(rcg->clkr.regmap, md_reg, &md);
                md = mn_to_md(mn, f->m, f->n, md);
                regmap_write(rcg->clkr.regmap, md_reg, md);
 
                ns = mn_to_ns(mn, f->m, f->n, ns);
-               regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+               regmap_write(rcg->clkr.regmap, ns_reg, ns);
 
-               ctl = mn_to_reg(mn, f->m, f->n, ctl);
-               regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl);
+               /* Two NS registers means mode control is in NS register */
+               if (rcg->ns_reg[0] != rcg->ns_reg[1]) {
+                       ns = mn_to_reg(mn, f->m, f->n, ns);
+                       regmap_write(rcg->clkr.regmap, ns_reg, ns);
+               } else {
+                       reg = mn_to_reg(mn, f->m, f->n, reg);
+                       regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg);
+               }
 
                ns &= ~BIT(mn->mnctr_reset_bit);
-               regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
-       } else {
+               regmap_write(rcg->clkr.regmap, ns_reg, ns);
+       }
+
+       if (banked_p) {
                p = &rcg->p[new_bank];
                ns = pre_div_to_ns(p, f->pre_div - 1, ns);
        }
 
        s = &rcg->s[new_bank];
        ns = src_to_ns(s, s->parent_map[f->src], ns);
-       regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+       regmap_write(rcg->clkr.regmap, ns_reg, ns);
 
        if (enabled) {
-               *regp ^= BIT(rcg->mux_sel_bit);
-               regmap_write(rcg->clkr.regmap, bank_reg, *regp);
+               regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+               reg ^= BIT(rcg->mux_sel_bit);
+               regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg);
        }
 }
 
 static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
-       u32 ns, ctl, md, reg;
+       u32 ns, md, reg;
        int bank;
        struct freq_tbl f = { 0 };
        bool banked_mn = !!rcg->mn[1].width;
+       bool banked_p = !!rcg->p[1].pre_div_width;
 
-       regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
-       regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
-       reg = banked_mn ? ctl : ns;
-
+       regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
        bank = reg_to_bank(rcg, reg);
 
+       regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
+
        if (banked_mn) {
                regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
                f.m = md_to_m(&rcg->mn[bank], md);
                f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m);
-       } else {
-               f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
        }
-       f.src = index;
 
+       if (banked_p)
+               f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
+
+       f.src = index;
        configure_bank(rcg, &f);
 
        return 0;
@@ -336,41 +340,30 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
        u32 m, n, pre_div, ns, md, mode, reg;
        int bank;
        struct mn *mn;
+       bool banked_p = !!rcg->p[1].pre_div_width;
        bool banked_mn = !!rcg->mn[1].width;
 
-       regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
-
-       if (banked_mn)
-               regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &reg);
-       else
-               reg = ns;
-
+       regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
        bank = reg_to_bank(rcg, reg);
 
+       regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
+       m = n = pre_div = mode = 0;
+
        if (banked_mn) {
                mn = &rcg->mn[bank];
                regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
                m = md_to_m(mn, md);
                n = ns_m_to_n(mn, ns, m);
+               /* Two NS registers means mode control is in NS register */
+               if (rcg->ns_reg[0] != rcg->ns_reg[1])
+                       reg = ns;
                mode = reg_to_mnctr_mode(mn, reg);
-               return calc_rate(parent_rate, m, n, mode, 0);
-       } else {
-               pre_div = ns_to_pre_div(&rcg->p[bank], ns);
-               return calc_rate(parent_rate, 0, 0, 0, pre_div);
        }
-}
 
-static const
-struct freq_tbl *find_freq(const struct freq_tbl *f, unsigned long rate)
-{
-       if (!f)
-               return NULL;
-
-       for (; f->freq; f++)
-               if (rate <= f->freq)
-                       return f;
+       if (banked_p)
+               pre_div = ns_to_pre_div(&rcg->p[bank], ns);
 
-       return NULL;
+       return calc_rate(parent_rate, m, n, mode, pre_div);
 }
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
@@ -379,7 +372,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 {
        unsigned long clk_flags;
 
-       f = find_freq(f, rate);
+       f = qcom_find_freq(f, rate);
        if (!f)
                return -EINVAL;
 
@@ -477,7 +470,7 @@ static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clk_rcg *rcg = to_clk_rcg(hw);
        const struct freq_tbl *f;
 
-       f = find_freq(rcg->freq_tbl, rate);
+       f = qcom_find_freq(rcg->freq_tbl, rate);
        if (!f)
                return -EINVAL;
 
@@ -497,7 +490,7 @@ static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
        const struct freq_tbl *f;
 
-       f = find_freq(rcg->freq_tbl, rate);
+       f = qcom_find_freq(rcg->freq_tbl, rate);
        if (!f)
                return -EINVAL;
 
index ba0523cefd2e4ad04fcb6a853e727263011333ee..687e41f91d7c9f90d99eb80acbd98086d237e4e5 100644 (file)
@@ -103,8 +103,9 @@ extern const struct clk_ops clk_rcg_bypass_ops;
  * struct clk_dyn_rcg - root clock generator with glitch free mux
  *
  * @mux_sel_bit: bit to switch glitch free mux
- * @ns_reg: NS register
+ * @ns_reg: NS0 and NS1 register
  * @md_reg: MD0 and MD1 register
+ * @bank_reg: register to XOR @mux_sel_bit into to switch glitch free mux
  * @mn: mn counter (banked)
  * @s: source selector (banked)
  * @freq_tbl: frequency table
@@ -113,8 +114,9 @@ extern const struct clk_ops clk_rcg_bypass_ops;
  *
  */
 struct clk_dyn_rcg {
-       u32     ns_reg;
+       u32     ns_reg[2];
        u32     md_reg[2];
+       u32     bank_reg;
 
        u8      mux_sel_bit;
 
index cd185d5cc67a3710f4a825c7ffd8bf4f66ae7f45..cfa9eb4fe9ca60c1420e7821f7d927e1fc82fb10 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/div64.h>
 
 #include "clk-rcg.h"
+#include "common.h"
 
 #define CMD_REG                        0x0
 #define CMD_UPDATE             BIT(0)
@@ -172,27 +173,13 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
        return calc_rate(parent_rate, m, n, mode, hid_div);
 }
 
-static const
-struct freq_tbl *find_freq(const struct freq_tbl *f, unsigned long rate)
-{
-       if (!f)
-               return NULL;
-
-       for (; f->freq; f++)
-               if (rate <= f->freq)
-                       return f;
-
-       /* Default to our fastest rate */
-       return f - 1;
-}
-
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
                const struct freq_tbl *f, unsigned long rate,
                unsigned long *p_rate, struct clk **p)
 {
        unsigned long clk_flags;
 
-       f = find_freq(f, rate);
+       f = qcom_find_freq(f, rate);
        if (!f)
                return -EINVAL;
 
@@ -268,7 +255,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
        const struct freq_tbl *f;
 
-       f = find_freq(rcg->freq_tbl, rate);
+       f = qcom_find_freq(rcg->freq_tbl, rate);
        if (!f)
                return -EINVAL;
 
index eeb3eea01f4ca751944df67da5d4df65cabbb048..e20d947db3e50516a132f9e6cc17717da0abd23f 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/reset-controller.h>
 
 #include "common.h"
+#include "clk-rcg.h"
 #include "clk-regmap.h"
 #include "reset.h"
 
@@ -27,6 +28,21 @@ struct qcom_cc {
        struct clk *clks[];
 };
 
+const
+struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+       if (!f)
+               return NULL;
+
+       for (; f->freq; f++)
+               if (rate <= f->freq)
+                       return f;
+
+       /* Default to our fastest rate */
+       return f - 1;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq);
+
 struct regmap *
 qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 {
index 2765e9d3da97ef5c23f976dd0e62fa4c34edf1d5..f519322acdf3be21d4ada6d014a517b459f956a6 100644 (file)
@@ -18,6 +18,7 @@ struct regmap_config;
 struct clk_regmap;
 struct qcom_reset_map;
 struct regmap;
+struct freq_tbl;
 
 struct qcom_cc_desc {
        const struct regmap_config *config;
@@ -27,6 +28,9 @@ struct qcom_cc_desc {
        size_t num_resets;
 };
 
+extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
+                                            unsigned long rate);
+
 extern struct regmap *qcom_cc_map(struct platform_device *pdev,
                                  const struct qcom_cc_desc *desc);
 extern int qcom_cc_really_probe(struct platform_device *pdev,
index 3b83b7dd78c7297b612c2c3720e8c86568ce447e..5cd62a709ac7d5baa494865b6a63aa2d533eb516 100644 (file)
 #include "clk-branch.h"
 #include "reset.h"
 
+static struct clk_pll pll0 = {
+       .l_reg = 0x30c4,
+       .m_reg = 0x30c8,
+       .n_reg = 0x30cc,
+       .config_reg = 0x30d4,
+       .mode_reg = 0x30c0,
+       .status_reg = 0x30d8,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll0",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap pll0_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(0),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll0_vote",
+               .parent_names = (const char *[]){ "pll0" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
 static struct clk_pll pll3 = {
        .l_reg = 0x3164,
        .m_reg = 0x3168,
@@ -154,7 +181,7 @@ static const u8 gcc_pxo_pll8_pll0[] = {
 static const char *gcc_pxo_pll8_pll0_map[] = {
        "pxo",
        "pll8_vote",
-       "pll0",
+       "pll0_vote",
 };
 
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
@@ -2133,6 +2160,8 @@ static struct clk_branch usb_fs1_h_clk = {
 };
 
 static struct clk_regmap *gcc_ipq806x_clks[] = {
+       [PLL0] = &pll0.clkr,
+       [PLL0_VOTE] = &pll0_vote,
        [PLL3] = &pll3.clkr,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
index 68da36cd7ed0e1f026b8a80cf047894ff17975ce..e8b33bbc362f9aa552959fbd56d231dba86ee1f0 100644 (file)
@@ -773,9 +773,11 @@ static struct freq_tbl clk_tbl_gfx2d[] = {
 };
 
 static struct clk_dyn_rcg gfx2d0_src = {
-       .ns_reg = 0x0070,
+       .ns_reg[0] = 0x0070,
+       .ns_reg[1] = 0x0070,
        .md_reg[0] = 0x0064,
        .md_reg[1] = 0x0068,
+       .bank_reg = 0x0060,
        .mn[0] = {
                .mnctr_en_bit = 8,
                .mnctr_reset_bit = 25,
@@ -831,9 +833,11 @@ static struct clk_branch gfx2d0_clk = {
 };
 
 static struct clk_dyn_rcg gfx2d1_src = {
-       .ns_reg = 0x007c,
+       .ns_reg[0] = 0x007c,
+       .ns_reg[1] = 0x007c,
        .md_reg[0] = 0x0078,
        .md_reg[1] = 0x006c,
+       .bank_reg = 0x0074,
        .mn[0] = {
                .mnctr_en_bit = 8,
                .mnctr_reset_bit = 25,
@@ -930,9 +934,11 @@ static struct freq_tbl clk_tbl_gfx3d_8064[] = {
 };
 
 static struct clk_dyn_rcg gfx3d_src = {
-       .ns_reg = 0x008c,
+       .ns_reg[0] = 0x008c,
+       .ns_reg[1] = 0x008c,
        .md_reg[0] = 0x0084,
        .md_reg[1] = 0x0088,
+       .bank_reg = 0x0080,
        .mn[0] = {
                .mnctr_en_bit = 8,
                .mnctr_reset_bit = 25,
@@ -1006,9 +1012,11 @@ static struct freq_tbl clk_tbl_vcap[] = {
 };
 
 static struct clk_dyn_rcg vcap_src = {
-       .ns_reg = 0x021c,
+       .ns_reg[0] = 0x021c,
+       .ns_reg[1] = 0x021c,
        .md_reg[0] = 0x01ec,
        .md_reg[1] = 0x0218,
+       .bank_reg = 0x0178,
        .mn[0] = {
                .mnctr_en_bit = 8,
                .mnctr_reset_bit = 23,
@@ -1211,9 +1219,11 @@ static struct freq_tbl clk_tbl_mdp[] = {
 };
 
 static struct clk_dyn_rcg mdp_src = {
-       .ns_reg = 0x00d0,
+       .ns_reg[0] = 0x00d0,
+       .ns_reg[1] = 0x00d0,
        .md_reg[0] = 0x00c4,
        .md_reg[1] = 0x00c8,
+       .bank_reg = 0x00c0,
        .mn[0] = {
                .mnctr_en_bit = 8,
                .mnctr_reset_bit = 31,
@@ -1318,7 +1328,9 @@ static struct freq_tbl clk_tbl_rot[] = {
 };
 
 static struct clk_dyn_rcg rot_src = {
-       .ns_reg = 0x00e8,
+       .ns_reg[0] = 0x00e8,
+       .ns_reg[1] = 0x00e8,
+       .bank_reg = 0x00e8,
        .p[0] = {
                .pre_div_shift = 22,
                .pre_div_width = 4,
@@ -1542,9 +1554,11 @@ static struct freq_tbl clk_tbl_vcodec[] = {
 };
 
 static struct clk_dyn_rcg vcodec_src = {
-       .ns_reg = 0x0100,
+       .ns_reg[0] = 0x0100,
+       .ns_reg[1] = 0x0100,
        .md_reg[0] = 0x00fc,
        .md_reg[1] = 0x0128,
+       .bank_reg = 0x00f8,
        .mn[0] = {
                .mnctr_en_bit = 5,
                .mnctr_reset_bit = 31,
index dc85f8e7a2d77530ae89036a23e6737291ef6d33..6e6cca3920829b5a7dc0685a071389a2d201d780 100644 (file)
@@ -110,7 +110,14 @@ enum exynos3250_plls {
        nr_plls
 };
 
+/* list of PLLs in DMC block to be registered */
+enum exynos3250_dmc_plls {
+       bpll, epll,
+       nr_dmc_plls
+};
+
 static void __iomem *reg_base;
+static void __iomem *dmc_reg_base;
 
 /*
  * Support for CMU save/restore across system suspends
@@ -266,6 +273,7 @@ PNAME(group_sclk_cam_blk_p) = { "xxti", "xusbxti",
                                    "none", "none", "none",
                                    "none", "div_mpll_pre",
                                    "mout_epll_user", "mout_vpll",
+                                   "none", "none", "none",
                                    "div_cam_blk_320", };
 PNAME(group_sclk_fimd0_p)      = { "xxti", "xusbxti",
                                    "m_bitclkhsdiv4_2l", "none",
@@ -353,8 +361,8 @@ static struct samsung_mux_clock mux_clks[] __initdata = {
 
        /* SRC_FSYS */
        MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
-       MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 3),
-       MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 3),
+       MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
+       MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
 
        /* SRC_PERIL0 */
        MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
@@ -423,7 +431,7 @@ static struct samsung_div_clock div_clks[] __initdata = {
        DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4),
        DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp",
                DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0),
-       DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 0, 4),
+       DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 4, 4),
 
        /* DIV_FSYS0 */
        DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8,
@@ -724,6 +732,25 @@ static struct samsung_pll_rate_table exynos3250_pll_rates[] = {
        { /* sentinel */ }
 };
 
+/* EPLL */
+static struct samsung_pll_rate_table exynos3250_epll_rates[] = {
+       PLL_36XX_RATE(800000000, 200, 3, 1,     0),
+       PLL_36XX_RATE(288000000,  96, 2, 2,     0),
+       PLL_36XX_RATE(192000000, 128, 2, 3,     0),
+       PLL_36XX_RATE(144000000,  96, 2, 3,     0),
+       PLL_36XX_RATE( 96000000, 128, 2, 4,     0),
+       PLL_36XX_RATE( 84000000, 112, 2, 4,     0),
+       PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
+       PLL_36XX_RATE( 73728000,  98, 2, 4, 19923),
+       PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
+       PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
+       PLL_36XX_RATE( 50000000, 200, 3, 5,     0),
+       PLL_36XX_RATE( 49152002, 131, 2, 5,  4719),
+       PLL_36XX_RATE( 48000000, 128, 2, 5,     0),
+       PLL_36XX_RATE( 45158401, 180, 3, 5, 41524),
+       { /* sentinel */ }
+};
+
 /* VPLL */
 static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
        PLL_36XX_RATE(600000000, 100, 2, 1,     0),
@@ -821,3 +848,172 @@ static void __init exynos3250_cmu_init(struct device_node *np)
        samsung_clk_of_add_provider(np, ctx);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
+
+/*
+ * CMU DMC
+ */
+
+#define BPLL_LOCK              0x0118
+#define BPLL_CON0              0x0218
+#define BPLL_CON1              0x021c
+#define BPLL_CON2              0x0220
+#define SRC_DMC                        0x0300
+#define DIV_DMC1               0x0504
+#define GATE_BUS_DMC0          0x0700
+#define GATE_BUS_DMC1          0x0704
+#define GATE_BUS_DMC2          0x0708
+#define GATE_BUS_DMC3          0x070c
+#define GATE_SCLK_DMC          0x0800
+#define GATE_IP_DMC0           0x0900
+#define GATE_IP_DMC1           0x0904
+#define EPLL_LOCK              0x1110
+#define EPLL_CON0              0x1114
+#define EPLL_CON1              0x1118
+#define EPLL_CON2              0x111c
+#define SRC_EPLL               0x1120
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
+
+static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
+       BPLL_LOCK,
+       BPLL_CON0,
+       BPLL_CON1,
+       BPLL_CON2,
+       SRC_DMC,
+       DIV_DMC1,
+       GATE_BUS_DMC0,
+       GATE_BUS_DMC1,
+       GATE_BUS_DMC2,
+       GATE_BUS_DMC3,
+       GATE_SCLK_DMC,
+       GATE_IP_DMC0,
+       GATE_IP_DMC1,
+       EPLL_LOCK,
+       EPLL_CON0,
+       EPLL_CON1,
+       EPLL_CON2,
+       SRC_EPLL,
+};
+
+static int exynos3250_dmc_clk_suspend(void)
+{
+       samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
+                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
+       return 0;
+}
+
+static void exynos3250_dmc_clk_resume(void)
+{
+       samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
+                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
+}
+
+static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
+       .suspend = exynos3250_dmc_clk_suspend,
+       .resume = exynos3250_dmc_clk_resume,
+};
+
+static void exynos3250_dmc_clk_sleep_init(void)
+{
+       exynos3250_dmc_clk_regs =
+               samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
+                                  ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
+       if (!exynos3250_dmc_clk_regs) {
+               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+               goto err;
+       }
+
+       register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
+       return;
+err:
+       kfree(exynos3250_dmc_clk_regs);
+}
+#else
+static inline void exynos3250_dmc_clk_sleep_init(void) { }
+#endif
+
+PNAME(mout_epll_p)     = { "fin_pll", "fout_epll", };
+PNAME(mout_bpll_p)     = { "fin_pll", "fout_bpll", };
+PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
+PNAME(mout_dphy_p)     = { "mout_mpll_mif", "mout_bpll", };
+
+static struct samsung_mux_clock dmc_mux_clks[] __initdata = {
+       /*
+        * NOTE: Following table is sorted by register address in ascending
+        * order and then bitfield shift in descending order, as it is done
+        * in the User's Manual. When adding new entries, please make sure
+        * that the order is preserved, to avoid merge conflicts and make
+        * further work with defined data easier.
+        */
+
+       /* SRC_DMC */
+       MUX(CLK_MOUT_MPLL_MIF, "mout_mpll_mif", mout_mpll_mif_p, SRC_DMC, 12, 1),
+       MUX(CLK_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1),
+       MUX(CLK_MOUT_DPHY, "mout_dphy", mout_dphy_p, SRC_DMC, 8, 1),
+       MUX(CLK_MOUT_DMC_BUS, "mout_dmc_bus", mout_dphy_p, SRC_DMC,  4, 1),
+
+       /* SRC_EPLL */
+       MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_EPLL, 4, 1),
+};
+
+static struct samsung_div_clock dmc_div_clks[] __initdata = {
+       /*
+        * NOTE: Following table is sorted by register address in ascending
+        * order and then bitfield shift in descending order, as it is done
+        * in the User's Manual. When adding new entries, please make sure
+        * that the order is preserved, to avoid merge conflicts and make
+        * further work with defined data easier.
+        */
+
+       /* DIV_DMC1 */
+       DIV(CLK_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3),
+       DIV(CLK_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3),
+       DIV(CLK_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus", DIV_DMC1, 19, 2),
+       DIV(CLK_DIV_DMCP, "div_dmcp", "div_dmcd", DIV_DMC1, 15, 3),
+       DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
+};
+
+static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = {
+       [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
+                       BPLL_LOCK, BPLL_CON0, NULL),
+       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+                       EPLL_LOCK, EPLL_CON0, NULL),
+};
+
+static void __init exynos3250_cmu_dmc_init(struct device_node *np)
+{
+       struct samsung_clk_provider *ctx;
+
+       dmc_reg_base = of_iomap(np, 0);
+       if (!dmc_reg_base)
+               panic("%s: failed to map registers\n", __func__);
+
+       ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
+       if (!ctx)
+               panic("%s: unable to allocate context.\n", __func__);
+
+       exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
+       exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
+
+       pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
+                       exynos3250_dmc_plls[bpll].rate_table[0].rate,
+                       exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
+                       exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
+                       exynos3250_dmc_plls[bpll].rate_table[0].sdiv
+             );
+       samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
+                               ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
+
+       samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
+       samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
+
+       exynos3250_dmc_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
+               exynos3250_cmu_dmc_init);
index ac163d7f5bc35e89f5b37c7525833dfdcdbf72a6..940f02837b824869932d3e6b63462942577a708c 100644 (file)
@@ -517,7 +517,7 @@ static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata
        FFACTOR(0, "sclk_apll_div_2", "sclk_apll", 1, 2, 0),
        FFACTOR(0, "fout_mpll_div_2", "fout_mpll", 1, 2, 0),
        FFACTOR(0, "fout_apll_div_2", "fout_apll", 1, 2, 0),
-       FFACTOR(0, "arm_clk_div_2", "arm_clk", 1, 2, 0),
+       FFACTOR(0, "arm_clk_div_2", "div_core2", 1, 2, 0),
 };
 
 static struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initdata = {
@@ -535,7 +535,7 @@ static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initda
 static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
        MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
                        CLK_SET_RATE_PARENT, 0, "mout_apll"),
-       MUX(0, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
+       MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
        MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
        MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
        MUX_F(CLK_MOUT_G3D1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1,
@@ -569,7 +569,7 @@ static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
        MUX(0, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
        MUX(0, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
        MUX(0, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
-       MUX(0, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
+       MUX(CLK_MOUT_MIXER, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
        MUX(0, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
        MUX(0, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
        MUX(0, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1),
@@ -719,7 +719,7 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
        DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
        DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
        DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
-       DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+       DIV(CLK_ARM_CLK, "div_core2", "div_core", DIV_CPU0, 28, 3),
        DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
        DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
        DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
@@ -733,8 +733,7 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
        DIV(0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
        DIV(0, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
        DIV(CLK_SCLK_MFC, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4),
-       DIV_F(0, "div_g3d", "mout_g3d", DIV_G3D, 0, 4,
-                       CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_SCLK_G3D, "sclk_g3d", "mout_g3d", DIV_G3D, 0, 4),
        DIV(0, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4),
        DIV(0, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4),
        DIV(0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
@@ -769,7 +768,6 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
        DIV(0, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
        DIV(0, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
        DIV(0, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
-       DIV(CLK_ARM_CLK, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
        DIV(CLK_SCLK_APLL, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
        DIV_F(0, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
                        CLK_SET_RATE_PARENT, 0),
@@ -857,8 +855,7 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
                0),
        GATE(CLK_TSI, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0),
        GATE(CLK_SROMC, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
-       GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
-                       CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_G3D, "g3d", "aclk200", GATE_IP_G3D, 0, 0, 0),
        GATE(CLK_PPMUG3D, "ppmug3d", "aclk200", GATE_IP_G3D, 1, 0, 0),
        GATE(CLK_USB_DEVICE, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
        GATE(CLK_ONENAND, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
@@ -1183,6 +1180,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
                        CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+       GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
        GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
                0),
 };
@@ -1486,7 +1484,7 @@ static void __init exynos4_clk_init(struct device_node *np,
                exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
                _get_rate("sclk_apll"), _get_rate("sclk_mpll"),
                _get_rate("sclk_epll"), _get_rate("sclk_vpll"),
-               _get_rate("arm_clk"));
+               _get_rate("div_core2"));
 }
 
 
index ce3de97e5f11299fd1d852f8ec4f1d6612cbf571..2527e39aadcfbfafab2a0b5c97d4aab2e4809458 100644 (file)
@@ -1581,7 +1581,7 @@ struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
        FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi",
                        NULL, CLK_IS_ROOT, 125000000),
        FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS,
-                       "phyclk_mipi_dphy_4l_m_txbyteclkhs" , NULL,
+                       "phyclk_mipi_dphy_4l_m_txbyte_clkhs" , NULL,
                        CLK_IS_ROOT, 187500000),
        FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m",
                        NULL, CLK_IS_ROOT, 24000000),
index 6850cba35871a7b6a4fe7383c0412f2b74754786..7ddc2b5538466737fbdc0e05ca3c58a153094b26 100644 (file)
@@ -5,6 +5,8 @@
 obj-y += clk-sunxi.o clk-factors.o
 obj-y += clk-a10-hosc.o
 obj-y += clk-a20-gmac.o
+obj-y += clk-mod0.o
+obj-y += clk-sun8i-mbus.o
 
 obj-$(CONFIG_MFD_SUN6I_PRCM) += \
        clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
index 2057c8ac648f6a381fd137ce3b5db66dca8afe0a..f83ba097126c6e72c22517a66f17034474e8ebf2 100644 (file)
@@ -9,18 +9,18 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/err.h>
 #include <linux/string.h>
 
-#include <linux/delay.h>
-
 #include "clk-factors.h"
 
 /*
- * DOC: basic adjustable factor-based clock that cannot gate
+ * DOC: basic adjustable factor-based clock
  *
  * Traits of this clock:
  * prepare - clk_prepare only ensures that parents are prepared
@@ -32,6 +32,8 @@
 
 #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
 
+#define FACTORS_MAX_PARENTS            5
+
 #define SETMASK(len, pos)              (((1U << (len)) - 1) << (pos))
 #define CLRMASK(len, pos)              (~(SETMASK(len, pos)))
 #define FACTOR_GET(bit, len, reg)      (((reg) & SETMASK(len, bit)) >> (bit))
@@ -147,9 +149,96 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
-const struct clk_ops clk_factors_ops = {
+static const struct clk_ops clk_factors_ops = {
        .determine_rate = clk_factors_determine_rate,
        .recalc_rate = clk_factors_recalc_rate,
        .round_rate = clk_factors_round_rate,
        .set_rate = clk_factors_set_rate,
 };
+
+struct clk * __init sunxi_factors_register(struct device_node *node,
+                                          const struct factors_data *data,
+                                          spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk_factors *factors;
+       struct clk_gate *gate = NULL;
+       struct clk_mux *mux = NULL;
+       struct clk_hw *gate_hw = NULL;
+       struct clk_hw *mux_hw = NULL;
+       const char *clk_name = node->name;
+       const char *parents[FACTORS_MAX_PARENTS];
+       void __iomem *reg;
+       int i = 0;
+
+       reg = of_iomap(node, 0);
+
+       /* if we have a mux, we will have >1 parents */
+       while (i < FACTORS_MAX_PARENTS &&
+              (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+               i++;
+
+       /*
+        * some factor clocks, such as pll5 and pll6, may have multiple
+        * outputs, and have their name designated in factors_data
+        */
+       if (data->name)
+               clk_name = data->name;
+       else
+               of_property_read_string(node, "clock-output-names", &clk_name);
+
+       factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
+       if (!factors)
+               return NULL;
+
+       /* set up factors properties */
+       factors->reg = reg;
+       factors->config = data->table;
+       factors->get_factors = data->getter;
+       factors->lock = lock;
+
+       /* Add a gate if this factor clock can be gated */
+       if (data->enable) {
+               gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+               if (!gate) {
+                       kfree(factors);
+                       return NULL;
+               }
+
+               /* set up gate properties */
+               gate->reg = reg;
+               gate->bit_idx = data->enable;
+               gate->lock = factors->lock;
+               gate_hw = &gate->hw;
+       }
+
+       /* Add a mux if this factor clock can be muxed */
+       if (data->mux) {
+               mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+               if (!mux) {
+                       kfree(factors);
+                       kfree(gate);
+                       return NULL;
+               }
+
+               /* set up gate properties */
+               mux->reg = reg;
+               mux->shift = data->mux;
+               mux->mask = SUNXI_FACTORS_MUX_MASK;
+               mux->lock = factors->lock;
+               mux_hw = &mux->hw;
+       }
+
+       clk = clk_register_composite(NULL, clk_name,
+                       parents, i,
+                       mux_hw, &clk_mux_ops,
+                       &factors->hw, &clk_factors_ops,
+                       gate_hw, &clk_gate_ops, 0);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               clk_register_clkdev(clk, clk_name, NULL);
+       }
+
+       return clk;
+}
index d2d0efa39379985f284387d87bba2159627480b1..9913840018d3fdb4f4717a4636b68f8dc6f4a6e6 100644 (file)
@@ -3,9 +3,12 @@
 
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
+#include <linux/spinlock.h>
 
 #define SUNXI_FACTORS_NOT_APPLICABLE   (0)
 
+#define SUNXI_FACTORS_MUX_MASK 0x3
+
 struct clk_factors_config {
        u8 nshift;
        u8 nwidth;
@@ -18,6 +21,14 @@ struct clk_factors_config {
        u8 n_start;
 };
 
+struct factors_data {
+       int enable;
+       int mux;
+       struct clk_factors_config *table;
+       void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+       const char *name;
+};
+
 struct clk_factors {
        struct clk_hw hw;
        void __iomem *reg;
@@ -26,5 +37,8 @@ struct clk_factors {
        spinlock_t *lock;
 };
 
-extern const struct clk_ops clk_factors_ops;
+struct clk * __init sunxi_factors_register(struct device_node *node,
+                                          const struct factors_data *data,
+                                          spinlock_t *lock);
+
 #endif
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
new file mode 100644 (file)
index 0000000..4a56385
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2013 Emilio López
+ *
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+
+#include "clk-factors.h"
+
+/**
+ * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
+ * MOD0 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
+                                      u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       u8 div, calcm, calcp;
+
+       /* These clocks can only divide, so we will never be able to achieve
+        * frequencies higher than the parent frequency */
+       if (*freq > parent_rate)
+               *freq = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, *freq);
+
+       if (div < 16)
+               calcp = 0;
+       else if (div / 2 < 16)
+               calcp = 1;
+       else if (div / 4 < 16)
+               calcp = 2;
+       else
+               calcp = 3;
+
+       calcm = DIV_ROUND_UP(div, 1 << calcp);
+
+       *freq = (parent_rate >> calcp) / calcm;
+
+       /* we were called to round the frequency, we can now return */
+       if (n == NULL)
+               return;
+
+       *m = calcm - 1;
+       *p = calcp;
+}
+
+/* user manual says "n" but it's really "p" */
+static struct clk_factors_config sun4i_a10_mod0_config = {
+       .mshift = 0,
+       .mwidth = 4,
+       .pshift = 16,
+       .pwidth = 2,
+};
+
+static const struct factors_data sun4i_a10_mod0_data __initconst = {
+       .enable = 31,
+       .mux = 24,
+       .table = &sun4i_a10_mod0_config,
+       .getter = sun4i_a10_get_mod0_factors,
+};
+
+static DEFINE_SPINLOCK(sun4i_a10_mod0_lock);
+
+static void __init sun4i_a10_mod0_setup(struct device_node *node)
+{
+       sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock);
+}
+CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
+
+static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
+
+static void __init sun5i_a13_mbus_setup(struct device_node *node)
+{
+       struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock);
+
+       /* The MBUS clocks needs to be always enabled */
+       __clk_get(mbus);
+       clk_prepare_enable(mbus);
+}
+CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
+
+struct mmc_phase_data {
+       u8      offset;
+};
+
+struct mmc_phase {
+       struct clk_hw           hw;
+       void __iomem            *reg;
+       struct mmc_phase_data   *data;
+       spinlock_t              *lock;
+};
+
+#define to_mmc_phase(_hw) container_of(_hw, struct mmc_phase, hw)
+
+static int mmc_get_phase(struct clk_hw *hw)
+{
+       struct clk *mmc, *mmc_parent, *clk = hw->clk;
+       struct mmc_phase *phase = to_mmc_phase(hw);
+       unsigned int mmc_rate, mmc_parent_rate;
+       u16 step, mmc_div;
+       u32 value;
+       u8 delay;
+
+       value = readl(phase->reg);
+       delay = (value >> phase->data->offset) & 0x3;
+
+       if (!delay)
+               return 180;
+
+       /* Get the main MMC clock */
+       mmc = clk_get_parent(clk);
+       if (!mmc)
+               return -EINVAL;
+
+       /* And its rate */
+       mmc_rate = clk_get_rate(mmc);
+       if (!mmc_rate)
+               return -EINVAL;
+
+       /* Now, get the MMC parent (most likely some PLL) */
+       mmc_parent = clk_get_parent(mmc);
+       if (!mmc_parent)
+               return -EINVAL;
+
+       /* And its rate */
+       mmc_parent_rate = clk_get_rate(mmc_parent);
+       if (!mmc_parent_rate)
+               return -EINVAL;
+
+       /* Get MMC clock divider */
+       mmc_div = mmc_parent_rate / mmc_rate;
+
+       step = DIV_ROUND_CLOSEST(360, mmc_div);
+       return delay * step;
+}
+
+static int mmc_set_phase(struct clk_hw *hw, int degrees)
+{
+       struct clk *mmc, *mmc_parent, *clk = hw->clk;
+       struct mmc_phase *phase = to_mmc_phase(hw);
+       unsigned int mmc_rate, mmc_parent_rate;
+       unsigned long flags;
+       u32 value;
+       u8 delay;
+
+       /* Get the main MMC clock */
+       mmc = clk_get_parent(clk);
+       if (!mmc)
+               return -EINVAL;
+
+       /* And its rate */
+       mmc_rate = clk_get_rate(mmc);
+       if (!mmc_rate)
+               return -EINVAL;
+
+       /* Now, get the MMC parent (most likely some PLL) */
+       mmc_parent = clk_get_parent(mmc);
+       if (!mmc_parent)
+               return -EINVAL;
+
+       /* And its rate */
+       mmc_parent_rate = clk_get_rate(mmc_parent);
+       if (!mmc_parent_rate)
+               return -EINVAL;
+
+       if (degrees != 180) {
+               u16 step, mmc_div;
+
+               /* Get MMC clock divider */
+               mmc_div = mmc_parent_rate / mmc_rate;
+
+               /*
+                * We can only outphase the clocks by multiple of the
+                * PLL's period.
+                *
+                * Since the MMC clock in only a divider, and the
+                * formula to get the outphasing in degrees is deg =
+                * 360 * delta / period
+                *
+                * If we simplify this formula, we can see that the
+                * only thing that we're concerned about is the number
+                * of period we want to outphase our clock from, and
+                * the divider set by the MMC clock.
+                */
+               step = DIV_ROUND_CLOSEST(360, mmc_div);
+               delay = DIV_ROUND_CLOSEST(degrees, step);
+       } else {
+               delay = 0;
+       }
+
+       spin_lock_irqsave(phase->lock, flags);
+       value = readl(phase->reg);
+       value &= ~GENMASK(phase->data->offset + 3, phase->data->offset);
+       value |= delay << phase->data->offset;
+       writel(value, phase->reg);
+       spin_unlock_irqrestore(phase->lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops mmc_clk_ops = {
+       .get_phase      = mmc_get_phase,
+       .set_phase      = mmc_set_phase,
+};
+
+static void __init sun4i_a10_mmc_phase_setup(struct device_node *node,
+                                            struct mmc_phase_data *data)
+{
+       const char *parent_names[1] = { of_clk_get_parent_name(node, 0) };
+       struct clk_init_data init = {
+               .num_parents    = 1,
+               .parent_names   = parent_names,
+               .ops            = &mmc_clk_ops,
+       };
+
+       struct mmc_phase *phase;
+       struct clk *clk;
+
+       phase = kmalloc(sizeof(*phase), GFP_KERNEL);
+       if (!phase)
+               return;
+
+       phase->hw.init = &init;
+
+       phase->reg = of_iomap(node, 0);
+       if (!phase->reg)
+               goto err_free;
+
+       phase->data = data;
+       phase->lock = &sun4i_a10_mod0_lock;
+
+       if (of_property_read_string(node, "clock-output-names", &init.name))
+               init.name = node->name;
+
+       clk = clk_register(NULL, &phase->hw);
+       if (IS_ERR(clk))
+               goto err_unmap;
+
+       of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+       return;
+
+err_unmap:
+       iounmap(phase->reg);
+err_free:
+       kfree(phase);
+}
+
+
+static struct mmc_phase_data mmc_output_clk = {
+       .offset = 8,
+};
+
+static struct mmc_phase_data mmc_sample_clk = {
+       .offset = 20,
+};
+
+static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
+{
+       sun4i_a10_mmc_phase_setup(node, &mmc_output_clk);
+}
+CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup);
+
+static void __init sun4i_a10_mmc_sample_setup(struct device_node *node)
+{
+       sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk);
+}
+CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup);
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
new file mode 100644 (file)
index 0000000..8e49b44
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+
+#include "clk-factors.h"
+
+/**
+ * sun8i_a23_get_mbus_factors() - calculates m factor for MBUS clocks
+ * MBUS rate is calculated as follows
+ * rate = parent_rate / (m + 1);
+ */
+
+static void sun8i_a23_get_mbus_factors(u32 *freq, u32 parent_rate,
+                                      u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       u8 div;
+
+       /*
+        * These clocks can only divide, so we will never be able to
+        * achieve frequencies higher than the parent frequency
+        */
+       if (*freq > parent_rate)
+               *freq = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, *freq);
+
+       if (div > 8)
+               div = 8;
+
+       *freq = parent_rate / div;
+
+       /* we were called to round the frequency, we can now return */
+       if (m == NULL)
+               return;
+
+       *m = div - 1;
+}
+
+static struct clk_factors_config sun8i_a23_mbus_config = {
+       .mshift = 0,
+       .mwidth = 3,
+};
+
+static const struct factors_data sun8i_a23_mbus_data __initconst = {
+       .enable = 31,
+       .mux = 24,
+       .table = &sun8i_a23_mbus_config,
+       .getter = sun8i_a23_get_mbus_factors,
+};
+
+static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
+
+static void __init sun8i_a23_mbus_setup(struct device_node *node)
+{
+       struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
+                                                 &sun8i_a23_mbus_lock);
+
+       /* The MBUS clocks needs to be always enabled */
+       __clk_get(mbus);
+       clk_prepare_enable(mbus);
+}
+CLK_OF_DECLARE(sun8i_a23_mbus, "allwinner,sun8i-a23-mbus-clk", sun8i_a23_mbus_setup);
index b654b7b1d137b22c81614bdfd274c9d860144582..d5dc951264cab957bcf2522bc61a9c62395c6432 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
+#include <linux/spinlock.h>
 
 #include "clk-factors.h"
 
@@ -319,46 +320,6 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
 
 
 
-/**
- * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
- * MOD0 rate is calculated as follows
- * rate = (parent_rate >> p) / (m + 1);
- */
-
-static void sun4i_get_mod0_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
-{
-       u8 div, calcm, calcp;
-
-       /* These clocks can only divide, so we will never be able to achieve
-        * frequencies higher than the parent frequency */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
-
-       div = DIV_ROUND_UP(parent_rate, *freq);
-
-       if (div < 16)
-               calcp = 0;
-       else if (div / 2 < 16)
-               calcp = 1;
-       else if (div / 4 < 16)
-               calcp = 2;
-       else
-               calcp = 3;
-
-       calcm = DIV_ROUND_UP(div, 1 << calcp);
-
-       *freq = (parent_rate >> calcp) / calcm;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm - 1;
-       *p = calcp;
-}
-
-
 
 /**
  * sun7i_a20_get_out_factors() - calculates m, p factors for CLK_OUT_A/B
@@ -440,16 +401,6 @@ EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
 
-#define SUNXI_FACTORS_MUX_MASK 0x3
-
-struct factors_data {
-       int enable;
-       int mux;
-       struct clk_factors_config *table;
-       void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
-       const char *name;
-};
-
 static struct clk_factors_config sun4i_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
@@ -503,14 +454,6 @@ static struct clk_factors_config sun4i_apb1_config = {
        .pwidth = 2,
 };
 
-/* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun4i_mod0_config = {
-       .mshift = 0,
-       .mwidth = 4,
-       .pshift = 16,
-       .pwidth = 2,
-};
-
 /* user manual says "n" but it's really "p" */
 static struct clk_factors_config sun7i_a20_out_config = {
        .mshift = 8,
@@ -568,13 +511,6 @@ static const struct factors_data sun4i_apb1_data __initconst = {
        .getter = sun4i_get_apb1_factors,
 };
 
-static const struct factors_data sun4i_mod0_data __initconst = {
-       .enable = 31,
-       .mux = 24,
-       .table = &sun4i_mod0_config,
-       .getter = sun4i_get_mod0_factors,
-};
-
 static const struct factors_data sun7i_a20_out_data __initconst = {
        .enable = 31,
        .mux = 24,
@@ -583,89 +519,9 @@ static const struct factors_data sun7i_a20_out_data __initconst = {
 };
 
 static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
-                                               const struct factors_data *data)
+                                                  const struct factors_data *data)
 {
-       struct clk *clk;
-       struct clk_factors *factors;
-       struct clk_gate *gate = NULL;
-       struct clk_mux *mux = NULL;
-       struct clk_hw *gate_hw = NULL;
-       struct clk_hw *mux_hw = NULL;
-       const char *clk_name = node->name;
-       const char *parents[SUNXI_MAX_PARENTS];
-       void __iomem *reg;
-       int i = 0;
-
-       reg = of_iomap(node, 0);
-
-       /* if we have a mux, we will have >1 parents */
-       while (i < SUNXI_MAX_PARENTS &&
-              (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
-               i++;
-
-       /*
-        * some factor clocks, such as pll5 and pll6, may have multiple
-        * outputs, and have their name designated in factors_data
-        */
-       if (data->name)
-               clk_name = data->name;
-       else
-               of_property_read_string(node, "clock-output-names", &clk_name);
-
-       factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
-       if (!factors)
-               return NULL;
-
-       /* Add a gate if this factor clock can be gated */
-       if (data->enable) {
-               gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-               if (!gate) {
-                       kfree(factors);
-                       return NULL;
-               }
-
-               /* set up gate properties */
-               gate->reg = reg;
-               gate->bit_idx = data->enable;
-               gate->lock = &clk_lock;
-               gate_hw = &gate->hw;
-       }
-
-       /* Add a mux if this factor clock can be muxed */
-       if (data->mux) {
-               mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
-               if (!mux) {
-                       kfree(factors);
-                       kfree(gate);
-                       return NULL;
-               }
-
-               /* set up gate properties */
-               mux->reg = reg;
-               mux->shift = data->mux;
-               mux->mask = SUNXI_FACTORS_MUX_MASK;
-               mux->lock = &clk_lock;
-               mux_hw = &mux->hw;
-       }
-
-       /* set up factors properties */
-       factors->reg = reg;
-       factors->config = data->table;
-       factors->get_factors = data->getter;
-       factors->lock = &clk_lock;
-
-       clk = clk_register_composite(NULL, clk_name,
-                       parents, i,
-                       mux_hw, &clk_mux_ops,
-                       &factors->hw, &clk_factors_ops,
-                       gate_hw, &clk_gate_ops, 0);
-
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
-
-       return clk;
+       return sunxi_factors_register(node, data, &clk_lock);
 }
 
 
@@ -762,10 +618,19 @@ static const struct div_data sun4i_ahb_data __initconst = {
        .width  = 2,
 };
 
+static const struct clk_div_table sun4i_apb0_table[] __initconst = {
+       { .val = 0, .div = 2 },
+       { .val = 1, .div = 2 },
+       { .val = 2, .div = 4 },
+       { .val = 3, .div = 8 },
+       { } /* sentinel */
+};
+
 static const struct div_data sun4i_apb0_data __initconst = {
        .shift  = 8,
        .pow    = 1,
        .width  = 2,
+       .table  = sun4i_apb0_table,
 };
 
 static const struct div_data sun6i_a31_apb2_div_data __initconst = {
@@ -1199,7 +1064,6 @@ static const struct of_device_id clk_factors_match[] __initconst = {
        {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
        {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
        {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
-       {.compatible = "allwinner,sun4i-a10-mod0-clk", .data = &sun4i_mod0_data,},
        {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
        {}
 };
@@ -1311,7 +1175,6 @@ static void __init sun4i_a10_init_clocks(struct device_node *node)
 CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
 
 static const char *sun5i_critical_clocks[] __initdata = {
-       "mbus",
        "pll5_ddr",
        "ahb_sdram",
 };
index 9525c684d14930267fbf00ec634550e52469a373..e3a85842ce0c05890351f49f4d4a6c330812b9ab 100644 (file)
@@ -1166,6 +1166,12 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
        clk_register_clkdev(clk, "pll_c_out1", NULL);
        clks[TEGRA124_CLK_PLL_C_OUT1] = clk;
 
+       /* PLLC_UD */
+       clk = clk_register_fixed_factor(NULL, "pll_c_ud", "pll_c",
+                                       CLK_SET_RATE_PARENT, 1, 1);
+       clk_register_clkdev(clk, "pll_c_ud", NULL);
+       clks[TEGRA124_CLK_PLL_C_UD] = clk;
+
        /* PLLC2 */
        clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
                             &pll_c2_params, NULL);
@@ -1198,6 +1204,8 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
        /* PLLM_UD */
        clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m",
                                        CLK_SET_RATE_PARENT, 1, 1);
+       clk_register_clkdev(clk, "pll_m_ud", NULL);
+       clks[TEGRA124_CLK_PLL_M_UD] = clk;
 
        /* PLLU */
        val = readl(clk_base + pll_u_params.base_reg);
index f87c609e8f727149c5402e55508815801f9e186c..97dc8595c3cd5c1b59113deaa16bbd23a5298cfc 100644 (file)
@@ -207,8 +207,13 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
 
        for (; tbl->clk_id < clk_max; tbl++) {
                clk = clks[tbl->clk_id];
-               if (IS_ERR_OR_NULL(clk))
-                       return;
+               if (IS_ERR_OR_NULL(clk)) {
+                       pr_err("%s: invalid entry %ld in clks array for id %d\n",
+                              __func__, PTR_ERR(clk), tbl->clk_id);
+                       WARN_ON(1);
+
+                       continue;
+               }
 
                if (tbl->parent_id < clk_max) {
                        struct clk *parent = clks[tbl->parent_id];
index 990e1d9edc01a074d746ab46de149915942ff4e1..59bb4b39d12e799f6d32d8f5a3d898e4dc9d1c46 100644 (file)
@@ -203,6 +203,7 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               kfree(parent_names);
                return;
        }
 cleanup:
@@ -228,6 +229,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
        cinfo->iobase = of_iomap(node, 0);
        cinfo->dev = &pdev->dev;
        pm_runtime_enable(cinfo->dev);
+       pm_runtime_irq_safe(cinfo->dev);
 
        pm_runtime_get_sync(cinfo->dev);
        atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
index b1a6f7144f3fcb10e96608b5247b36f227f04600..337abe5909e1f272cdcdca85e5ae0fb3fe1f4729 100644 (file)
@@ -25,8 +25,8 @@
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
-static int ti_dt_clk_memmap_index;
 struct ti_clk_ll_ops *ti_clk_ll_ops;
+static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
 
 /**
  * ti_dt_clocks_register - register DT alias clocks during boot
@@ -108,9 +108,21 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
        struct clk_omap_reg *reg;
        u32 val;
        u32 tmp;
+       int i;
 
        reg = (struct clk_omap_reg *)&tmp;
-       reg->index = ti_dt_clk_memmap_index;
+
+       for (i = 0; i < CLK_MAX_MEMMAPS; i++) {
+               if (clocks_node_ptr[i] == node->parent)
+                       break;
+       }
+
+       if (i == CLK_MAX_MEMMAPS) {
+               pr_err("clk-provider not found for %s!\n", node->name);
+               return NULL;
+       }
+
+       reg->index = i;
 
        if (of_property_read_u32_index(node, "reg", index, &val)) {
                pr_err("%s must have reg[%d]!\n", node->name, index);
@@ -127,20 +139,14 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
  * @parent: master node
  * @index: internal index for clk_reg_ops
  *
- * Initializes a master clock IP block and its child clock nodes.
- * Regmap is provided for accessing the register space for the
- * IP block and all the clocks under it.
+ * Initializes a master clock IP block. This basically sets up the
+ * mapping from clocks node to the memory map index. All the clocks
+ * are then initialized through the common of_clk_init call, and the
+ * clocks will access their memory maps based on the node layout.
  */
 void ti_dt_clk_init_provider(struct device_node *parent, int index)
 {
-       const struct of_device_id *match;
-       struct device_node *np;
        struct device_node *clocks;
-       of_clk_init_cb_t clk_init_cb;
-       struct clk_init_item *retry;
-       struct clk_init_item *tmp;
-
-       ti_dt_clk_memmap_index = index;
 
        /* get clocks for this parent */
        clocks = of_get_child_by_name(parent, "clocks");
@@ -149,19 +155,31 @@ void ti_dt_clk_init_provider(struct device_node *parent, int index)
                return;
        }
 
-       for_each_child_of_node(clocks, np) {
-               match = of_match_node(&__clk_of_table, np);
-               if (!match)
-                       continue;
-               clk_init_cb = (of_clk_init_cb_t)match->data;
-               pr_debug("%s: initializing: %s\n", __func__, np->name);
-               clk_init_cb(np);
-       }
+       /* add clocks node info */
+       clocks_node_ptr[index] = clocks;
+}
 
-       list_for_each_entry_safe(retry, tmp, &retry_list, link) {
-               pr_debug("retry-init: %s\n", retry->node->name);
-               retry->func(retry->hw, retry->node);
-               list_del(&retry->link);
-               kfree(retry);
+/**
+ * ti_dt_clk_init_retry_clks - init clocks from the retry list
+ *
+ * Initializes any clocks that have failed to initialize before,
+ * reasons being missing parent node(s) during earlier init. This
+ * typically happens only for DPLLs which need to have both of their
+ * parent clocks ready during init.
+ */
+void ti_dt_clk_init_retry_clks(void)
+{
+       struct clk_init_item *retry;
+       struct clk_init_item *tmp;
+       int retries = 5;
+
+       while (!list_empty(&retry_list) && retries) {
+               list_for_each_entry_safe(retry, tmp, &retry_list, link) {
+                       pr_debug("retry-init: %s\n", retry->node->name);
+                       retry->func(retry->hw, retry->node);
+                       list_del(&retry->link);
+                       kfree(retry);
+               }
+               retries--;
        }
 }
index f1e0038d76acd34028764ab52fbfe8d511a4a1ae..b4c5faccaece634c4c9459c6be311f64e4daaf45 100644 (file)
@@ -36,6 +36,11 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
 
        for (i = 0; i < num_clks; i++) {
                clk = of_clk_get(node, i);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: Failed get %s' clock nr %d (%ld)\n",
+                              __func__, node->full_name, i, PTR_ERR(clk));
+                       continue;
+               }
                if (__clk_get_flags(clk) & CLK_IS_BASIC) {
                        pr_warn("can't setup clkdm for basic clk %s\n",
                                __clk_get_name(clk));
index a837f703be658304b7d2a3a0e2743bd17d7077a7..bff2b5b8ff598b2e150496eb3ebc984c651a0ebd 100644 (file)
@@ -300,8 +300,8 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        return clk;
 }
 
-static struct clk_div_table
-__init *ti_clk_get_div_table(struct device_node *node)
+static struct clk_div_table *
+__init ti_clk_get_div_table(struct device_node *node)
 {
        struct clk_div_table *table;
        const __be32 *divspec;
index f7a32d2326c6428f4f4e8b1000381b7dc4301e5f..773bcde893c0472b3ecf8f2e9e3a5356d532a402 100644 (file)
@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
                goto out;
        }
 
-       freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
+       freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
        if (!freq_table) {
                ret = -ENOMEM;
                goto out;
index e396ad3f8f3fcd10c4ce9063e9257ad3ce25ab9f..0668b389c5165cae8d02e4bedb270b32b8372bde 100644 (file)
@@ -708,10 +708,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
-       struct cpudata *cpu;
-
-       cpu = all_cpu_data[policy->cpu];
-
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
index 6a9d89c93b1fdd0f7dfb4f8a68dee807efc5f29a..ae2ab14e64b30e5edf9119569d379766eba24b94 100644 (file)
@@ -362,8 +362,9 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
                        vchan_cyclic_callback(&chan->desc->vdesc);
                } else {
                        if (chan->next_sg == chan->desc->num_sgs) {
-                               chan->desc = NULL;
+                               list_del(&chan->desc->vdesc.node);
                                vchan_cookie_complete(&chan->desc->vdesc);
+                               chan->desc = NULL;
                        }
                }
        }
index 6557147d9331e252d38c566a07b61ada3b7a7f71..7e4c43c189600cc042248f21e8796c86acf79755 100644 (file)
@@ -241,9 +241,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
        bgwrite(~0x0, BT848_INT_STAT);
        bgwrite(0x0, BT848_GPIO_OUT_EN);
 
-       iounmap(bg->mmio);
-       release_mem_region(pci_resource_start(pdev, 0),
-                          pci_resource_len(pdev, 0));
        pci_disable_device(pdev);
 }
 
index a2cc6be97983fe327cd4f67a4c3204cd4985691c..b792194e0d9ceb6320457c99f270a67680a2d6d8 100644 (file)
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
        uint32_t data, jreg;
+       ast_open_key(ast);
 
        if (dev->pdev->device == PCI_CHIP_AST1180) {
                ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
                        }
                        ast->vga2_clone = false;
                } else {
-                       ast->chip = 2000;
+                       ast->chip = AST2000;
                        DRM_INFO("AST 2000 detected\n");
                }
        }
index 2e7f03ad5ee2e9e84e9de0588ce78d8ad67f86f2..9933c26017edae26a2bab5879e8be0543735c027 100644 (file)
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_power_domains_init_hw(dev_priv);
 
+       /*
+        * We enable some interrupt sources in our postinstall hooks, so mark
+        * interrupts as enabled _before_ actually enabling them to avoid
+        * special cases in our ordering checks.
+        */
+       dev_priv->pm._irqs_disabled = false;
+
        ret = drm_irq_install(dev, dev->pdev->irq);
        if (ret)
                goto cleanup_gem_stolen;
 
-       dev_priv->pm._irqs_disabled = false;
-
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
index 7a830eac5ba30a7859eb65313d9ee567c5d324eb..3524306d8cfbf7b9d7e93b2bd076774eb69b36bc 100644 (file)
@@ -184,6 +184,7 @@ enum hpd_pin {
                if ((1 << (domain)) & (mask))
 
 struct drm_i915_private;
+struct i915_mm_struct;
 struct i915_mmu_object;
 
 enum intel_dpll_id {
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
        struct i915_gtt gtt; /* VM representing the global address space */
 
        struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
-       DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+       DECLARE_HASHTABLE(mm_structs, 7);
+       struct mutex mm_lock;
 
        /* Kernel Modesetting */
 
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
                        unsigned workers :4;
 #define I915_GEM_USERPTR_MAX_WORKERS 15
 
-                       struct mm_struct *mm;
-                       struct i915_mmu_object *mn;
+                       struct i915_mm_struct *mm;
+                       struct i915_mmu_object *mmu_object;
                        struct work_struct *work;
                } userptr;
        };
index ba7f5c6bb50d1f7e5b886ea20e74ed58a9ae5247..ad55b06a3cb1c69754fe60947a183e0af6604e46 100644 (file)
@@ -1590,10 +1590,13 @@ unlock:
 out:
        switch (ret) {
        case -EIO:
-               /* If this -EIO is due to a gpu hang, give the reset code a
-                * chance to clean up the mess. Otherwise return the proper
-                * SIGBUS. */
-               if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+               /*
+                * We eat errors when the gpu is terminally wedged to avoid
+                * userspace unduly crashing (gl has no provisions for mmaps to
+                * fail). But any other -EIO isn't ours (e.g. swap in failure)
+                * and so needs to be reported.
+                */
+               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
                        ret = VM_FAULT_SIGBUS;
                        break;
                }
index fe69fc837d9ee25f33b3dbd15664c2c33b9ca721..d384139973790e09d2da3583861c7224b1596f44 100644 (file)
 #include <linux/mempolicy.h>
 #include <linux/swap.h>
 
+struct i915_mm_struct {
+       struct mm_struct *mm;
+       struct drm_device *dev;
+       struct i915_mmu_notifier *mn;
+       struct hlist_node node;
+       struct kref kref;
+       struct work_struct work;
+};
+
 #if defined(CONFIG_MMU_NOTIFIER)
 #include <linux/interval_tree.h>
 
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
        struct mmu_notifier mn;
        struct rb_root objects;
        struct list_head linear;
-       struct drm_device *dev;
-       struct mm_struct *mm;
-       struct work_struct work;
-       unsigned long count;
        unsigned long serial;
        bool has_linear;
 };
 
 struct i915_mmu_object {
-       struct i915_mmu_notifier *mmu;
+       struct i915_mmu_notifier *mn;
        struct interval_tree_node it;
        struct list_head link;
        struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
                                      unsigned long start,
                                      unsigned long end)
 {
-       struct i915_mmu_object *mmu;
+       struct i915_mmu_object *mo;
        unsigned long serial;
 
 restart:
        serial = mn->serial;
-       list_for_each_entry(mmu, &mn->linear, link) {
+       list_for_each_entry(mo, &mn->linear, link) {
                struct drm_i915_gem_object *obj;
 
-               if (mmu->it.last < start || mmu->it.start > end)
+               if (mo->it.last < start || mo->it.start > end)
                        continue;
 
-               obj = mmu->obj;
+               obj = mo->obj;
                drm_gem_object_reference(&obj->base);
                spin_unlock(&mn->lock);
 
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 };
 
 static struct i915_mmu_notifier *
-__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_mmu_notifier *mmu;
-
-       /* Protected by dev->struct_mutex */
-       hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
-               if (mmu->mm == mm)
-                       return mmu;
-
-       return NULL;
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+i915_mmu_notifier_create(struct mm_struct *mm)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_mmu_notifier *mmu;
+       struct i915_mmu_notifier *mn;
        int ret;
 
-       lockdep_assert_held(&dev->struct_mutex);
-
-       mmu = __i915_mmu_notifier_lookup(dev, mm);
-       if (mmu)
-               return mmu;
-
-       mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
-       if (mmu == NULL)
+       mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+       if (mn == NULL)
                return ERR_PTR(-ENOMEM);
 
-       spin_lock_init(&mmu->lock);
-       mmu->dev = dev;
-       mmu->mn.ops = &i915_gem_userptr_notifier;
-       mmu->mm = mm;
-       mmu->objects = RB_ROOT;
-       mmu->count = 0;
-       mmu->serial = 1;
-       INIT_LIST_HEAD(&mmu->linear);
-       mmu->has_linear = false;
-
-       /* Protected by mmap_sem (write-lock) */
-       ret = __mmu_notifier_register(&mmu->mn, mm);
+       spin_lock_init(&mn->lock);
+       mn->mn.ops = &i915_gem_userptr_notifier;
+       mn->objects = RB_ROOT;
+       mn->serial = 1;
+       INIT_LIST_HEAD(&mn->linear);
+       mn->has_linear = false;
+
+        /* Protected by mmap_sem (write-lock) */
+       ret = __mmu_notifier_register(&mn->mn, mm);
        if (ret) {
-               kfree(mmu);
+               kfree(mn);
                return ERR_PTR(ret);
        }
 
-       /* Protected by dev->struct_mutex */
-       hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
-       return mmu;
+       return mn;
 }
 
-static void
-__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
 {
-       struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
-       mmu_notifier_unregister(&mmu->mn, mmu->mm);
-       kfree(mmu);
-}
-
-static void
-__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
-{
-       lockdep_assert_held(&mmu->dev->struct_mutex);
-
-       /* Protected by dev->struct_mutex */
-       hash_del(&mmu->node);
-
-       /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
-        * We enter the function holding struct_mutex, therefore we need
-        * to drop our mutex prior to calling mmu_notifier_unregister in
-        * order to prevent lock inversion (and system-wide deadlock)
-        * between the mmap_sem and struct-mutex. Hence we defer the
-        * unregistration to a workqueue where we hold no locks.
-        */
-       INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
-       schedule_work(&mmu->work);
-}
-
-static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
-{
-       if (++mmu->serial == 0)
-               mmu->serial = 1;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
-{
-       struct i915_mmu_object *mn;
-
-       list_for_each_entry(mn, &mmu->linear, link)
-               if (mn->is_linear)
-                       return true;
-
-       return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
-                     struct i915_mmu_object *mn)
-{
-       lockdep_assert_held(&mmu->dev->struct_mutex);
-
-       spin_lock(&mmu->lock);
-       list_del(&mn->link);
-       if (mn->is_linear)
-               mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
-       else
-               interval_tree_remove(&mn->it, &mmu->objects);
-       __i915_mmu_notifier_update_serial(mmu);
-       spin_unlock(&mmu->lock);
-
-       /* Protected against _add() by dev->struct_mutex */
-       if (--mmu->count == 0)
-               __i915_mmu_notifier_destroy(mmu);
+       if (++mn->serial == 0)
+               mn->serial = 1;
 }
 
 static int
-i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
-                     struct i915_mmu_object *mn)
+i915_mmu_notifier_add(struct drm_device *dev,
+                     struct i915_mmu_notifier *mn,
+                     struct i915_mmu_object *mo)
 {
        struct interval_tree_node *it;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(mmu->dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
         * remove the objects from the interval tree) before we do
         * the check for overlapping objects.
         */
-       i915_gem_retire_requests(mmu->dev);
+       i915_gem_retire_requests(dev);
 
-       spin_lock(&mmu->lock);
-       it = interval_tree_iter_first(&mmu->objects,
-                                     mn->it.start, mn->it.last);
+       spin_lock(&mn->lock);
+       it = interval_tree_iter_first(&mn->objects,
+                                     mo->it.start, mo->it.last);
        if (it) {
                struct drm_i915_gem_object *obj;
 
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
 
                obj = container_of(it, struct i915_mmu_object, it)->obj;
                if (!obj->userptr.workers)
-                       mmu->has_linear = mn->is_linear = true;
+                       mn->has_linear = mo->is_linear = true;
                else
                        ret = -EAGAIN;
        } else
-               interval_tree_insert(&mn->it, &mmu->objects);
+               interval_tree_insert(&mo->it, &mn->objects);
 
        if (ret == 0) {
-               list_add(&mn->link, &mmu->linear);
-               __i915_mmu_notifier_update_serial(mmu);
+               list_add(&mo->link, &mn->linear);
+               __i915_mmu_notifier_update_serial(mn);
        }
-       spin_unlock(&mmu->lock);
-       mutex_unlock(&mmu->dev->struct_mutex);
+       spin_unlock(&mn->lock);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
 
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+       struct i915_mmu_object *mo;
+
+       list_for_each_entry(mo, &mn->linear, link)
+               if (mo->is_linear)
+                       return true;
+
+       return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+                     struct i915_mmu_object *mo)
+{
+       spin_lock(&mn->lock);
+       list_del(&mo->link);
+       if (mo->is_linear)
+               mn->has_linear = i915_mmu_notifier_has_linear(mn);
+       else
+               interval_tree_remove(&mo->it, &mn->objects);
+       __i915_mmu_notifier_update_serial(mn);
+       spin_unlock(&mn->lock);
+}
+
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
-       struct i915_mmu_object *mn;
+       struct i915_mmu_object *mo;
 
-       mn = obj->userptr.mn;
-       if (mn == NULL)
+       mo = obj->userptr.mmu_object;
+       if (mo == NULL)
                return;
 
-       i915_mmu_notifier_del(mn->mmu, mn);
-       obj->userptr.mn = NULL;
+       i915_mmu_notifier_del(mo->mn, mo);
+       kfree(mo);
+
+       obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+       if (mm->mn == NULL) {
+               down_write(&mm->mm->mmap_sem);
+               mutex_lock(&to_i915(mm->dev)->mm_lock);
+               if (mm->mn == NULL)
+                       mm->mn = i915_mmu_notifier_create(mm->mm);
+               mutex_unlock(&to_i915(mm->dev)->mm_lock);
+               up_write(&mm->mm->mmap_sem);
+       }
+       return mm->mn;
 }
 
 static int
 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
                                    unsigned flags)
 {
-       struct i915_mmu_notifier *mmu;
-       struct i915_mmu_object *mn;
+       struct i915_mmu_notifier *mn;
+       struct i915_mmu_object *mo;
        int ret;
 
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 
-       down_write(&obj->userptr.mm->mmap_sem);
-       ret = i915_mutex_lock_interruptible(obj->base.dev);
-       if (ret == 0) {
-               mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
-               if (!IS_ERR(mmu))
-                       mmu->count++; /* preemptive add to act as a refcount */
-               else
-                       ret = PTR_ERR(mmu);
-               mutex_unlock(&obj->base.dev->struct_mutex);
-       }
-       up_write(&obj->userptr.mm->mmap_sem);
-       if (ret)
-               return ret;
+       if (WARN_ON(obj->userptr.mm == NULL))
+               return -EINVAL;
 
-       mn = kzalloc(sizeof(*mn), GFP_KERNEL);
-       if (mn == NULL) {
-               ret = -ENOMEM;
-               goto destroy_mmu;
-       }
+       mn = i915_mmu_notifier_find(obj->userptr.mm);
+       if (IS_ERR(mn))
+               return PTR_ERR(mn);
 
-       mn->mmu = mmu;
-       mn->it.start = obj->userptr.ptr;
-       mn->it.last = mn->it.start + obj->base.size - 1;
-       mn->obj = obj;
+       mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+       if (mo == NULL)
+               return -ENOMEM;
 
-       ret = i915_mmu_notifier_add(mmu, mn);
-       if (ret)
-               goto free_mn;
+       mo->mn = mn;
+       mo->it.start = obj->userptr.ptr;
+       mo->it.last = mo->it.start + obj->base.size - 1;
+       mo->obj = obj;
 
-       obj->userptr.mn = mn;
+       ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+       if (ret) {
+               kfree(mo);
+               return ret;
+       }
+
+       obj->userptr.mmu_object = mo;
        return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+                      struct mm_struct *mm)
+{
+       if (mn == NULL)
+               return;
 
-free_mn:
+       mmu_notifier_unregister(&mn->mn, mm);
        kfree(mn);
-destroy_mmu:
-       mutex_lock(&obj->base.dev->struct_mutex);
-       if (--mmu->count == 0)
-               __i915_mmu_notifier_destroy(mmu);
-       mutex_unlock(&obj->base.dev->struct_mutex);
-       return ret;
 }
 
 #else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 
        return 0;
 }
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+                      struct mm_struct *mm)
+{
+}
+
 #endif
 
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+       struct i915_mm_struct *mm;
+
+       /* Protected by dev_priv->mm_lock */
+       hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+               if (mm->mm == real)
+                       return mm;
+
+       return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_mm_struct *mm;
+       int ret = 0;
+
+       /* During release of the GEM object we hold the struct_mutex. This
+        * precludes us from calling mmput() at that time as that may be
+        * the last reference and so call exit_mmap(). exit_mmap() will
+        * attempt to reap the vma, and if we were holding a GTT mmap
+        * would then call drm_gem_vm_close() and attempt to reacquire
+        * the struct mutex. So in order to avoid that recursion, we have
+        * to defer releasing the mm reference until after we drop the
+        * struct_mutex, i.e. we need to schedule a worker to do the clean
+        * up.
+        */
+       mutex_lock(&dev_priv->mm_lock);
+       mm = __i915_mm_struct_find(dev_priv, current->mm);
+       if (mm == NULL) {
+               mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+               if (mm == NULL) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               kref_init(&mm->kref);
+               mm->dev = obj->base.dev;
+
+               mm->mm = current->mm;
+               atomic_inc(&current->mm->mm_count);
+
+               mm->mn = NULL;
+
+               /* Protected by dev_priv->mm_lock */
+               hash_add(dev_priv->mm_structs,
+                        &mm->node, (unsigned long)mm->mm);
+       } else
+               kref_get(&mm->kref);
+
+       obj->userptr.mm = mm;
+out:
+       mutex_unlock(&dev_priv->mm_lock);
+       return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+       struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+       i915_mmu_notifier_free(mm->mn, mm->mm);
+       mmdrop(mm->mm);
+       kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+       struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+       /* Protected by dev_priv->mm_lock */
+       hash_del(&mm->node);
+       mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+       INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+       schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+       if (obj->userptr.mm == NULL)
+               return;
+
+       kref_put_mutex(&obj->userptr.mm->kref,
+                      __i915_mm_struct_free,
+                      &to_i915(obj->base.dev)->mm_lock);
+       obj->userptr.mm = NULL;
+}
+
 struct get_pages_work {
        struct work_struct work;
        struct drm_i915_gem_object *obj;
        struct task_struct *task;
 };
 
-
 #if IS_ENABLED(CONFIG_SWIOTLB)
 #define swiotlb_active() swiotlb_nr_tbl()
 #else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec == NULL)
                pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (pvec != NULL) {
-               struct mm_struct *mm = obj->userptr.mm;
+               struct mm_struct *mm = obj->userptr.mm->mm;
 
                down_read(&mm->mmap_sem);
                while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
        pvec = NULL;
        pinned = 0;
-       if (obj->userptr.mm == current->mm) {
+       if (obj->userptr.mm->mm == current->mm) {
                pvec = kmalloc(num_pages*sizeof(struct page *),
                               GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
                if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 {
        i915_gem_userptr_release__mmu_notifier(obj);
-
-       if (obj->userptr.mm) {
-               mmput(obj->userptr.mm);
-               obj->userptr.mm = NULL;
-       }
+       i915_gem_userptr_release__mm_struct(obj);
 }
 
 static int
 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 {
-       if (obj->userptr.mn)
+       if (obj->userptr.mmu_object)
                return 0;
 
        return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
                return -ENODEV;
        }
 
-       /* Allocate the new object */
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
                return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
         * at binding. This means that we need to hook into the mmu_notifier
         * in order to detect if the mmu is destroyed.
         */
-       ret = -ENOMEM;
-       if ((obj->userptr.mm = get_task_mm(current)))
+       ret = i915_gem_userptr_init__mm_struct(obj);
+       if (ret == 0)
                ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
        if (ret == 0)
                ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 int
 i915_gem_init_userptr(struct drm_device *dev)
 {
-#if defined(CONFIG_MMU_NOTIFIER)
        struct drm_i915_private *dev_priv = to_i915(dev);
-       hash_init(dev_priv->mmu_notifiers);
-#endif
+       mutex_init(&dev_priv->mm_lock);
+       hash_init(dev_priv->mm_structs);
        return 0;
 }
index e4d7607da2c482ee6e31576e746de5ca71b6e28a..f29b44c86a2f7354c29dd0ce3ca3a5cfc7ae723c 100644 (file)
 #define GFX_OP_DESTBUFFER_INFO  ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
 #define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
 #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
-#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+
+#define COLOR_BLT_CMD                  (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD               ((2<<29)|(0x43<<22)|4)
 #define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22)|6)
 #define XY_MONO_SRC_COPY_IMM_BLT       ((2<<29)|(0x71<<22)|5)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA    (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB      (1<<20)
+#define   BLT_WRITE_A                  (2<<20)
+#define   BLT_WRITE_RGB                        (1<<20)
+#define   BLT_WRITE_RGBA               (BLT_WRITE_RGB | BLT_WRITE_A)
 #define   BLT_DEPTH_8                  (0<<24)
 #define   BLT_DEPTH_16_565             (1<<24)
 #define   BLT_DEPTH_16_1555            (2<<24)
 #define   BLT_DEPTH_32                 (3<<24)
-#define   BLT_ROP_GXCOPY               (0xcc<<16)
+#define   BLT_ROP_SRC_COPY             (0xcc<<16)
+#define   BLT_ROP_COLOR_COPY           (0xf0<<16)
 #define XY_SRC_COPY_BLT_SRC_TILED      (1<<15) /* 965+ only */
 #define XY_SRC_COPY_BLT_DST_TILED      (1<<11) /* 965+ only */
 #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
index a66955037e4e2f30bb9662fed0be12a02d5deaf2..eee79e1c322253165a4c58cff448fa401119abcd 100644 (file)
@@ -1123,7 +1123,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        }
 }
 
-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
 {
        DRM_DEBUG_KMS("Falling back to manually reading VBT from "
                      "VBIOS ROM for %s\n",
index e8abfce40976c74a2f9cff93c4d93c897eda112e..9212e6504e0f1a2867ae7913d01cf019107d77b0 100644 (file)
@@ -804,7 +804,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
        .destroy = intel_encoder_destroy,
 };
 
-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
 {
        DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
        return 1;
index d074d704f458f3b8f81774391d2a6459e416f360..d8324c69fa868de2364cf1b272426f5a634c5c31 100644 (file)
@@ -2233,6 +2233,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
        if (need_vtd_wa(dev) && alignment < 256 * 1024)
                alignment = 256 * 1024;
 
+       /*
+        * Global gtt pte registers are special registers which actually forward
+        * writes to a chunk of system memory. Which means that there is no risk
+        * that the register values disappear as soon as we call
+        * intel_runtime_pm_put(), so it is correct to wrap only the
+        * pin/unpin/fence and not more.
+        */
+       intel_runtime_pm_get(dev_priv);
+
        dev_priv->mm.interruptible = false;
        ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
        if (ret)
@@ -2250,12 +2259,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
        i915_gem_object_pin_fence(obj);
 
        dev_priv->mm.interruptible = true;
+       intel_runtime_pm_put(dev_priv);
        return 0;
 
 err_unpin:
        i915_gem_object_unpin_from_display_plane(obj);
 err_interruptible:
        dev_priv->mm.interruptible = true;
+       intel_runtime_pm_put(dev_priv);
        return ret;
 }
 
@@ -4188,10 +4199,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
 
        intel_disable_pipe(dev_priv, pipe);
-
-       if (intel_crtc->config.dp_encoder_is_mst)
-               intel_ddi_set_vc_payload_alloc(crtc, false);
-
        ironlake_pfit_disable(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4256,6 +4263,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
        intel_disable_pipe(dev_priv, pipe);
 
+       if (intel_crtc->config.dp_encoder_is_mst)
+               intel_ddi_set_vc_payload_alloc(crtc, false);
+
        intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
        ironlake_pfit_disable(intel_crtc);
@@ -8240,6 +8250,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
                        goto fail_locked;
                }
 
+               /*
+                * Global gtt pte registers are special registers which actually
+                * forward writes to a chunk of system memory. Which means that
+                * there is no risk that the register values disappear as soon
+                * as we call intel_runtime_pm_put(), so it is correct to wrap
+                * only the pin/unpin/fence and not more.
+                */
+               intel_runtime_pm_get(dev_priv);
+
                /* Note that the w/a also requires 2 PTE of padding following
                 * the bo. We currently fill all unused PTE with the shadow
                 * page and so we should always have valid PTE following the
@@ -8252,16 +8271,20 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
                ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
                if (ret) {
                        DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
+                       intel_runtime_pm_put(dev_priv);
                        goto fail_locked;
                }
 
                ret = i915_gem_object_put_fence(obj);
                if (ret) {
                        DRM_DEBUG_KMS("failed to release fence for cursor");
+                       intel_runtime_pm_put(dev_priv);
                        goto fail_unpin;
                }
 
                addr = i915_gem_obj_ggtt_offset(obj);
+
+               intel_runtime_pm_put(dev_priv);
        } else {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
                ret = i915_gem_object_attach_phys(obj, align);
@@ -12481,6 +12504,9 @@ static struct intel_quirk intel_quirks[] = {
        /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
        { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
 
+       /* Acer C720 Chromebook (Core i3 4005U) */
+       { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
        /* Toshiba CB35 Chromebook (Celeron 2955U) */
        { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 
index 67cfed6d911ae99eae257a67b8e05ea64ba145d1..81d7681faa638f1c7879b5cd0aafb3f56a948adf 100644 (file)
@@ -3661,24 +3661,12 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
        return intel_dp_detect_dpcd(intel_dp);
 }
 
-static enum drm_connector_status
-g4x_dp_detect(struct intel_dp *intel_dp)
+static int g4x_digital_port_connected(struct drm_device *dev,
+                                      struct intel_digital_port *intel_dig_port)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        uint32_t bit;
 
-       /* Can't disconnect eDP, but you can close the lid... */
-       if (is_edp(intel_dp)) {
-               enum drm_connector_status status;
-
-               status = intel_panel_detect(dev);
-               if (status == connector_status_unknown)
-                       status = connector_status_connected;
-               return status;
-       }
-
        if (IS_VALLEYVIEW(dev)) {
                switch (intel_dig_port->port) {
                case PORT_B:
@@ -3691,7 +3679,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
                        bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
                        break;
                default:
-                       return connector_status_unknown;
+                       return -EINVAL;
                }
        } else {
                switch (intel_dig_port->port) {
@@ -3705,11 +3693,36 @@ g4x_dp_detect(struct intel_dp *intel_dp)
                        bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
                        break;
                default:
-                       return connector_status_unknown;
+                       return -EINVAL;
                }
        }
 
        if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
+               return 0;
+       return 1;
+}
+
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       int ret;
+
+       /* Can't disconnect eDP, but you can close the lid... */
+       if (is_edp(intel_dp)) {
+               enum drm_connector_status status;
+
+               status = intel_panel_detect(dev);
+               if (status == connector_status_unknown)
+                       status = connector_status_connected;
+               return status;
+       }
+
+       ret = g4x_digital_port_connected(dev, intel_dig_port);
+       if (ret == -EINVAL)
+               return connector_status_unknown;
+       else if (ret == 0)
                return connector_status_disconnected;
 
        return intel_dp_detect_dpcd(intel_dp);
@@ -4066,8 +4079,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        intel_display_power_get(dev_priv, power_domain);
 
        if (long_hpd) {
-               if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
-                       goto mst_fail;
+
+               if (HAS_PCH_SPLIT(dev)) {
+                       if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+                               goto mst_fail;
+               } else {
+                       if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
+                               goto mst_fail;
+               }
 
                if (!intel_dp_get_dpcd(intel_dp)) {
                        goto mst_fail;
index 881361c0f27e746097414a5ddc30448227175602..fdf40267249c1031a4a52a8f110b41fbf93ff61f 100644 (file)
@@ -538,7 +538,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
        .destroy = intel_encoder_destroy,
 };
 
-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
 {
        DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
        return 1;
index 59b028f0b1e8c7a652cc174c17373b90a4782077..8e374449c6b562356f47ac2ba169f974f8e7226f 100644 (file)
@@ -801,7 +801,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
 
        cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
        if (cpu_ctl2 & BLM_PWM_ENABLE) {
-               WARN(1, "cpu backlight already enabled\n");
+               DRM_DEBUG_KMS("cpu backlight already enabled\n");
                cpu_ctl2 &= ~BLM_PWM_ENABLE;
                I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
        }
@@ -845,7 +845,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
 
        ctl = I915_READ(BLC_PWM_CTL);
        if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
-               WARN(1, "backlight already enabled\n");
+               DRM_DEBUG_KMS("backlight already enabled\n");
                I915_WRITE(BLC_PWM_CTL, 0);
        }
 
@@ -876,7 +876,7 @@ static void i965_enable_backlight(struct intel_connector *connector)
 
        ctl2 = I915_READ(BLC_PWM_CTL2);
        if (ctl2 & BLM_PWM_ENABLE) {
-               WARN(1, "backlight already enabled\n");
+               DRM_DEBUG_KMS("backlight already enabled\n");
                ctl2 &= ~BLM_PWM_ENABLE;
                I915_WRITE(BLC_PWM_CTL2, ctl2);
        }
@@ -910,7 +910,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
 
        ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
        if (ctl2 & BLM_PWM_ENABLE) {
-               WARN(1, "backlight already enabled\n");
+               DRM_DEBUG_KMS("backlight already enabled\n");
                ctl2 &= ~BLM_PWM_ENABLE;
                I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
        }
index 16371a444426d190d73b4baddd38cd8abeae66b2..2d068edd1adc41bc8d71cd919a55361de093b19d 100644 (file)
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 #define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                                u64 offset, u32 len,
                                unsigned flags)
 {
+       u32 cs_offset = ring->scratch.gtt_offset;
        int ret;
 
-       if (flags & I915_DISPATCH_PINNED) {
-               ret = intel_ring_begin(ring, 4);
-               if (ret)
-                       return ret;
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
 
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-               intel_ring_emit(ring, offset + len - 8);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
-       } else {
-               u32 cs_offset = ring->scratch.gtt_offset;
+       /* Evict the invalid PTE TLBs */
+       intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+       intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+       intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+       intel_ring_emit(ring, cs_offset);
+       intel_ring_emit(ring, 0xdeadbeef);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
 
+       if ((flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
 
-               ret = intel_ring_begin(ring, 9+3);
+               ret = intel_ring_begin(ring, 6 + 2);
                if (ret)
                        return ret;
-               /* Blit the batch (which has now all relocs applied) to the stable batch
-                * scratch bo area (so that the CS never stumbles over its tlb
-                * invalidation bug) ... */
-               intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
-                               XY_SRC_COPY_BLT_WRITE_ALPHA |
-                               XY_SRC_COPY_BLT_WRITE_RGB);
-               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
-               intel_ring_emit(ring, 0);
-               intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+
+               /* Blit the batch (which has now all relocs applied) to the
+                * stable batch scratch bo area (so that the CS never
+                * stumbles over its tlb invalidation bug) ...
+                */
+               intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+               intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
                intel_ring_emit(ring, cs_offset);
-               intel_ring_emit(ring, 0);
                intel_ring_emit(ring, 4096);
                intel_ring_emit(ring, offset);
+
                intel_ring_emit(ring, MI_FLUSH);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
 
                /* ... and execute it. */
-               intel_ring_emit(ring, MI_BATCH_BUFFER);
-               intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-               intel_ring_emit(ring, cs_offset + len - 8);
-               intel_ring_advance(ring);
+               offset = cs_offset;
        }
 
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring, MI_BATCH_BUFFER);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+       intel_ring_emit(ring, offset + len - 8);
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+
        return 0;
 }
 
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
        /* Workaround batchbuffer to combat CS tlb bug. */
        if (HAS_BROKEN_CS_TLB(dev)) {
-               obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+               obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
                if (obj == NULL) {
                        DRM_ERROR("Failed to allocate batch bo\n");
                        return -ENOMEM;
index 32186a656816e1e4de4c533cbf699e9886dda2d8..c14341ca3ef9386a0be1b432206f00c15ddae09b 100644 (file)
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+       intel_wait_for_vblank(encoder->base.dev,
+                             to_intel_crtc(encoder->base.crtc)->pipe);
+
        I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
 }
 
@@ -1311,6 +1315,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
 {
        struct drm_display_mode mode;
        struct intel_tv *intel_tv = intel_attached_tv(connector);
+       enum drm_connector_status status;
        int type;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
@@ -1328,16 +1333,19 @@ intel_tv_detect(struct drm_connector *connector, bool force)
                if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
                        type = intel_tv_detect_type(intel_tv, connector);
                        intel_release_load_detect_pipe(connector, &tmp);
+                       status = type < 0 ?
+                               connector_status_disconnected :
+                               connector_status_connected;
                } else
-                       return connector_status_unknown;
+                       status = connector_status_unknown;
 
                drm_modeset_drop_locks(&ctx);
                drm_modeset_acquire_fini(&ctx);
        } else
                return connector->status;
 
-       if (type < 0)
-               return connector_status_disconnected;
+       if (status != connector_status_connected)
+               return status;
 
        intel_tv->type = type;
        intel_tv_find_better_format(connector);
index a125a7e32742c3707bb3e0446a4fdd8f64fffd3e..c6c9b02e0ada9f052249a556a9e2169aa967b91d 100644 (file)
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
        priv->hdmi_pdev = pdev;
 }
 
+#ifdef CONFIG_OF
+static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+{
+       int gpio = of_get_named_gpio(of_node, name, 0);
+       if (gpio < 0) {
+               char name2[32];
+               snprintf(name2, sizeof(name2), "%s-gpio", name);
+               gpio = of_get_named_gpio(of_node, name2, 0);
+               if (gpio < 0) {
+                       dev_err(dev, "failed to get gpio: %s (%d)\n",
+                                       name, gpio);
+                       gpio = -1;
+               }
+       }
+       return gpio;
+}
+#endif
+
 static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        static struct hdmi_platform_config config = {};
 #ifdef CONFIG_OF
        struct device_node *of_node = dev->of_node;
 
-       int get_gpio(const char *name)
-       {
-               int gpio = of_get_named_gpio(of_node, name, 0);
-               if (gpio < 0) {
-                       char name2[32];
-                       snprintf(name2, sizeof(name2), "%s-gpio", name);
-                       gpio = of_get_named_gpio(of_node, name2, 0);
-                       if (gpio < 0) {
-                               dev_err(dev, "failed to get gpio: %s (%d)\n",
-                                               name, gpio);
-                               gpio = -1;
-                       }
-               }
-               return gpio;
-       }
-
        if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
                static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
                static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        }
 
        config.mmio_name     = "core_physical";
-       config.ddc_clk_gpio  = get_gpio("qcom,hdmi-tx-ddc-clk");
-       config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
-       config.hpd_gpio      = get_gpio("qcom,hdmi-tx-hpd");
-       config.mux_en_gpio   = get_gpio("qcom,hdmi-tx-mux-en");
-       config.mux_sel_gpio  = get_gpio("qcom,hdmi-tx-mux-sel");
-       config.mux_lpm_gpio  = get_gpio("qcom,hdmi-tx-mux-lpm");
+       config.ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+       config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+       config.hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+       config.mux_en_gpio   = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+       config.mux_sel_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+       config.mux_lpm_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
 
 #else
        static const char *hpd_clk_names[] = {
index 902d7685d441f7dca81096626fc6f047f29ca218..f408b69486a8eb83d299abc636356187d2d467bb 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#ifdef CONFIG_COMMON_CLK
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#endif
 
 #include "hdmi.h"
 
 struct hdmi_phy_8960 {
        struct hdmi_phy base;
        struct hdmi *hdmi;
+#ifdef CONFIG_COMMON_CLK
        struct clk_hw pll_hw;
        struct clk *pll;
        unsigned long pixclk;
+#endif
 };
 #define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+#ifdef CONFIG_COMMON_CLK
 #define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
 
 /*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
        .parent_names = hdmi_pll_parents,
        .num_parents = ARRAY_SIZE(hdmi_pll_parents),
 };
-
+#endif
 
 /*
  * HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 {
        struct hdmi_phy_8960 *phy_8960;
        struct hdmi_phy *phy = NULL;
-       int ret, i;
+       int ret;
+#ifdef CONFIG_COMMON_CLK
+       int i;
 
        /* sanity check: */
        for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
                if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
                        return ERR_PTR(-EINVAL);
+#endif
 
        phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
        if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
 
        phy_8960->hdmi = hdmi;
 
+#ifdef CONFIG_COMMON_CLK
        phy_8960->pll_hw.init = &pll_init;
        phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
        if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
                phy_8960->pll = NULL;
                goto fail;
        }
+#endif
 
        return phy;
 
index 26ee80db17af9fec7308f2e4defbac06f6dd6d3d..fcf95680413d5da99c86fed5a44c9c6386ba3350 100644 (file)
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
 #define reglog 0
 #endif
 
-static char *vram;
+static char *vram = "16m";
 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
 module_param(vram, charp, 0);
 
index 8701968a9743aa7540cdfc0759919c058a45a3fa..30a2911878f893fce40885f2aeb6d8b6bf4d60a3 100644 (file)
@@ -86,7 +86,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
        sclass = nv_parent(parent)->sclass;
        while (sclass) {
                if (++nr < size)
-                       lclass[nr] = sclass->oclass->handle;
+                       lclass[nr] = sclass->oclass->handle & 0xffff;
                sclass = sclass->sclass;
        }
 
@@ -96,7 +96,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
                if (engine && (oclass = engine->sclass)) {
                        while (oclass->ofuncs) {
                                if (++nr < size)
-                                       lclass[nr] = oclass->handle;
+                                       lclass[nr] = oclass->handle & 0xffff;
                                oclass++;
                        }
                }
index b1e11f8434e28badd30572219b1d095a6a06adc4..ac14b67621d36f7a068de85103e06ba644915ea3 100644 (file)
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
        u8 msg[DP_DPCD_SIZE];
        int ret;
 
-       char dpcd_hex_dump[DP_DPCD_SIZE * 3];
-
        ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
                               DP_DPCD_SIZE);
        if (ret > 0) {
                memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-               hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
-                                  32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
-               DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                             dig_connector->dpcd);
 
                radeon_dp_probe_oui(radeon_connector);
 
index e616eb5f6e7a4076979650147acad69b9b40d567..3cfb50056f7ac75ec27f1ef3f8bfc09218c2f152 100644 (file)
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 
-       /* PFP_SYNC_ME packet only exists on 7xx+ */
-       if (emit_wait && (rdev->family >= CHIP_RV770)) {
+       /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+       if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
                /* Prevent the PFP from running ahead of the semaphore wait */
                radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
                radeon_ring_write(ring, 0x0);
index 92b2d8dd47355f7f7b601025082868d3e29e60be..e74c7e387ddebfbb74d32866b39d2046d2632540 100644 (file)
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
+       /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+       if ((dev->pdev->device == 0x9805) &&
+           (dev->pdev->subsystem_vendor == 0x1734) &&
+           (dev->pdev->subsystem_device == 0x11bd)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+                       return false;
+       }
 
        return true;
 }
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
                                 (controller->ucFanParameters &
                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
                        rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
-               } else if ((controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
-                          (controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
-                          (controller->ucType ==
-                           ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
-                       DRM_INFO("Special thermal controller config\n");
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+                       DRM_INFO("External GPIO thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+                       DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+                       DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
                } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
                        DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
                                 pp_lib_thermal_controller_names[controller->ucType],
                                 controller->ucI2cAddress >> 1,
                                 (controller->ucFanParameters &
                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
                        i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
                        rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
                        if (rdev->pm.i2c_bus) {
index 56d9fd66d8aed379c417d1ba4a47de7aa65ba472..abd6753a570afa119ac1e68255231c99f7b27995 100644 (file)
@@ -34,7 +34,7 @@
 int radeon_semaphore_create(struct radeon_device *rdev,
                            struct radeon_semaphore **semaphore)
 {
-       uint32_t *cpu_addr;
+       uint64_t *cpu_addr;
        int i, r;
 
        *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
index 7bfdaa163a33a19fb7ca8d775e654277aea497ea..36b871686d3c6147a7b033b9db45a07ed39e9530 100644 (file)
@@ -450,11 +450,11 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
                                          res,
                                          id_loc - sw_context->buf_start);
        if (unlikely(ret != 0))
-               goto out_err;
+               return ret;
 
        ret = vmw_resource_val_add(sw_context, res, &node);
        if (unlikely(ret != 0))
-               goto out_err;
+               return ret;
 
        if (res_type == vmw_res_context && dev_priv->has_mob &&
            node->first_usage) {
@@ -468,13 +468,13 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 
                ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
                if (unlikely(ret != 0))
-                       goto out_err;
+                       return ret;
                node->staged_bindings =
                        kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
                if (node->staged_bindings == NULL) {
                        DRM_ERROR("Failed to allocate context binding "
                                  "information.\n");
-                       goto out_err;
+                       return -ENOMEM;
                }
                INIT_LIST_HEAD(&node->staged_bindings->list);
        }
@@ -482,8 +482,7 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
        if (p_val)
                *p_val = node;
 
-out_err:
-       return ret;
+       return 0;
 }
 
 
index 6ccd993e26bf4ead66d0e8d1f1b4b592856d7599..6eae14d2a3f73511143a42da95757418edf01207 100644 (file)
@@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
        mutex_lock(&dev_priv->hw_mutex);
 
+       vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
-               vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+               ;
 
        dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
index fc6f5d54e7f755282025045f843ba62dcff68fa9..8890870309e4db7f5f875c202756006b3323538b 100644 (file)
@@ -309,6 +309,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
        data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT);
        i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf);
        data->update_interval = ds1721_convrates[resol];
+       data->zbits = 7 - resol;
        mutex_unlock(&data->update_lock);
 
        return count;
index 79a68999a6962914f93106b4fc9154129b9071f0..917d54588d95c14f966abd326e12467bea244342 100644 (file)
@@ -101,6 +101,7 @@ struct at91_twi_dev {
        unsigned twi_cwgr_reg;
        struct at91_twi_pdata *pdata;
        bool use_dma;
+       bool recv_len_abort;
        struct at91_twi_dma dma;
 };
 
@@ -267,12 +268,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
        *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
        --dev->buf_len;
 
+       /* return if aborting, we only needed to read RHR to clear RXRDY*/
+       if (dev->recv_len_abort)
+               return;
+
        /* handle I2C_SMBUS_BLOCK_DATA */
        if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
-               dev->msg->flags &= ~I2C_M_RECV_LEN;
-               dev->buf_len += *dev->buf;
-               dev->msg->len = dev->buf_len + 1;
-               dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
+               /* ensure length byte is a valid value */
+               if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
+                       dev->msg->flags &= ~I2C_M_RECV_LEN;
+                       dev->buf_len += *dev->buf;
+                       dev->msg->len = dev->buf_len + 1;
+                       dev_dbg(dev->dev, "received block length %d\n",
+                                        dev->buf_len);
+               } else {
+                       /* abort and send the stop by reading one more byte */
+                       dev->recv_len_abort = true;
+                       dev->buf_len = 1;
+               }
        }
 
        /* send stop if second but last byte has been read */
@@ -421,8 +434,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                }
        }
 
-       ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
-                                                       dev->adapter.timeout);
+       ret = wait_for_completion_io_timeout(&dev->cmd_complete,
+                                            dev->adapter.timeout);
        if (ret == 0) {
                dev_err(dev->dev, "controller timed out\n");
                at91_init_twi_bus(dev);
@@ -444,6 +457,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                ret = -EIO;
                goto error;
        }
+       if (dev->recv_len_abort) {
+               dev_err(dev->dev, "invalid smbus block length recvd\n");
+               ret = -EPROTO;
+               goto error;
+       }
+
        dev_dbg(dev->dev, "transfer complete\n");
 
        return 0;
@@ -500,6 +519,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
        dev->buf_len = m_start->len;
        dev->buf = m_start->buf;
        dev->msg = m_start;
+       dev->recv_len_abort = false;
 
        ret = at91_do_twi_transfer(dev);
 
index 6dc5ded86f6262e8b9eb4bceeae83e4d8eafd4e9..2f64273d3f2bba63c8f930e66fad83fdf5750551 100644 (file)
@@ -746,8 +746,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
        }
        tclk = clk_get_rate(drv_data->clk);
 
-       rc = of_property_read_u32(np, "clock-frequency", &bus_freq);
-       if (rc)
+       if (of_property_read_u32(np, "clock-frequency", &bus_freq))
                bus_freq = 100000; /* 100kHz by default */
 
        if (!mv64xxx_find_baud_factors(bus_freq, tclk,
index f3c7139dfa251f51b6c67da1b9f2de869fb6860d..1cc146cfc1f3dce02d4d83c7e79b698f07b161c9 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 
 /* register offsets */
 #define ICSCR  0x00    /* slave ctrl */
@@ -95,6 +96,7 @@ struct rcar_i2c_priv {
        struct i2c_msg  *msg;
        struct clk *clk;
 
+       spinlock_t lock;
        wait_queue_head_t wait;
 
        int pos;
@@ -365,20 +367,20 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
        struct rcar_i2c_priv *priv = ptr;
        u32 msr;
 
+       /*-------------- spin lock -----------------*/
+       spin_lock(&priv->lock);
+
        msr = rcar_i2c_read(priv, ICMSR);
 
+       /* Only handle interrupts that are currently enabled */
+       msr &= rcar_i2c_read(priv, ICMIER);
+
        /* Arbitration lost */
        if (msr & MAL) {
                rcar_i2c_flags_set(priv, (ID_DONE | ID_ARBLOST));
                goto out;
        }
 
-       /* Stop */
-       if (msr & MST) {
-               rcar_i2c_flags_set(priv, ID_DONE);
-               goto out;
-       }
-
        /* Nack */
        if (msr & MNR) {
                /* go to stop phase */
@@ -388,6 +390,12 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
                goto out;
        }
 
+       /* Stop */
+       if (msr & MST) {
+               rcar_i2c_flags_set(priv, ID_DONE);
+               goto out;
+       }
+
        if (rcar_i2c_is_recv(priv))
                rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr));
        else
@@ -400,6 +408,9 @@ out:
                wake_up(&priv->wait);
        }
 
+       spin_unlock(&priv->lock);
+       /*-------------- spin unlock -----------------*/
+
        return IRQ_HANDLED;
 }
 
@@ -409,14 +420,21 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 {
        struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
        struct device *dev = rcar_i2c_priv_to_dev(priv);
+       unsigned long flags;
        int i, ret, timeout;
 
        pm_runtime_get_sync(dev);
 
+       /*-------------- spin lock -----------------*/
+       spin_lock_irqsave(&priv->lock, flags);
+
        rcar_i2c_init(priv);
        /* start clock */
        rcar_i2c_write(priv, ICCCR, priv->icccr);
 
+       spin_unlock_irqrestore(&priv->lock, flags);
+       /*-------------- spin unlock -----------------*/
+
        ret = rcar_i2c_bus_barrier(priv);
        if (ret < 0)
                goto out;
@@ -428,6 +446,9 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
                        break;
                }
 
+               /*-------------- spin lock -----------------*/
+               spin_lock_irqsave(&priv->lock, flags);
+
                /* init each data */
                priv->msg       = &msgs[i];
                priv->pos       = 0;
@@ -437,6 +458,9 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
                ret = rcar_i2c_prepare_msg(priv);
 
+               spin_unlock_irqrestore(&priv->lock, flags);
+               /*-------------- spin unlock -----------------*/
+
                if (ret < 0)
                        break;
 
@@ -540,6 +564,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        init_waitqueue_head(&priv->wait);
+       spin_lock_init(&priv->lock);
 
        adap = &priv->adap;
        adap->nr = pdev->id;
index 69e11853e8bff187e09459167506ec24b8f7a122..e637c32ae5172bcaa77bdd2c5eda8dee34457176 100644 (file)
@@ -323,6 +323,10 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
        /* ack interrupt */
        i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
 
+       /* Can only handle a maximum of 32 bytes at a time */
+       if (len > 32)
+               len = 32;
+
        /* read the data from receive buffer */
        for (i = 0; i < len; ++i) {
                if (i % 4 == 0)
index e1e558a3d692bbd8b907a84efe222481b0ae1a8e..af8256353c7de6a9449f3da753b4b07e8df025fc 100644 (file)
@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
        return err;
 }
 
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+                                   u64 *reg_id)
+{
+       void *ib_flow;
+       union ib_flow_spec *ib_spec;
+       struct mlx4_dev *dev = to_mdev(qp->device)->dev;
+       int err = 0;
+
+       if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+               return 0; /* do nothing */
+
+       ib_flow = flow_attr + 1;
+       ib_spec = (union ib_flow_spec *)ib_flow;
+
+       if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+               return 0; /* do nothing */
+
+       err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+                                   flow_attr->port, qp->qp_num,
+                                   MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+                                   reg_id);
+       return err;
+}
+
 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                                    struct ib_flow_attr *flow_attr,
                                    int domain)
@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                i++;
        }
 
+       if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+               err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+               if (err)
+                       goto err_free;
+       }
+
        return &mflow->ibflow;
 
 err_free:
index 67780452f0cfb85d72fc98bfd0faefbe005fb32a..efb9eff8906c34845053ba04ced5dea9bfe8cbe4 100644 (file)
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                }
        }
 
-       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
                context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
                                        MLX4_IB_LINK_TYPE_ETH;
+               if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+                       /* set QP to receive both tunneled & non-tunneled packets */
+                       if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+                               context->srqn = cpu_to_be32(7 << 28);
+               }
+       }
 
        if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
                int is_eth = rdma_port_get_link_layer(
index c30204f2fa3097a376eec7a32072f0c29b9c1a90..fbe29fcb15c5b85d27c5e001df59fe20d144b577 100644 (file)
@@ -236,6 +236,18 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
 }
 EXPORT_SYMBOL(input_mt_report_pointer_emulation);
 
+static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
+{
+       int i;
+
+       for (i = 0; i < mt->num_slots; i++) {
+               if (!input_mt_is_used(mt, &mt->slots[i])) {
+                       input_mt_slot(dev, i);
+                       input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+               }
+       }
+}
+
 /**
  * input_mt_drop_unused() - Inactivate slots not seen in this frame
  * @dev: input device with allocated MT slots
@@ -245,19 +257,11 @@ EXPORT_SYMBOL(input_mt_report_pointer_emulation);
 void input_mt_drop_unused(struct input_dev *dev)
 {
        struct input_mt *mt = dev->mt;
-       int i;
 
-       if (!mt)
-               return;
-
-       for (i = 0; i < mt->num_slots; i++) {
-               if (!input_mt_is_used(mt, &mt->slots[i])) {
-                       input_mt_slot(dev, i);
-                       input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
-               }
+       if (mt) {
+               __input_mt_drop_unused(dev, mt);
+               mt->frame++;
        }
-
-       mt->frame++;
 }
 EXPORT_SYMBOL(input_mt_drop_unused);
 
@@ -278,12 +282,14 @@ void input_mt_sync_frame(struct input_dev *dev)
                return;
 
        if (mt->flags & INPUT_MT_DROP_UNUSED)
-               input_mt_drop_unused(dev);
+               __input_mt_drop_unused(dev, mt);
 
        if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
                use_count = true;
 
        input_mt_report_pointer_emulation(dev, use_count);
+
+       mt->frame++;
 }
 EXPORT_SYMBOL(input_mt_sync_frame);
 
index 180b184ab90f31935e1fa9c71bd7eb8fa4e68593..d70b65a14ced24ffe09e298562de54ab9b9a52e4 100644 (file)
@@ -33,8 +33,8 @@
 #define CAP1106_REG_SENSOR_CONFIG      0x22
 #define CAP1106_REG_SENSOR_CONFIG2     0x23
 #define CAP1106_REG_SAMPLING_CONFIG    0x24
-#define CAP1106_REG_CALIBRATION                0x25
-#define CAP1106_REG_INT_ENABLE         0x26
+#define CAP1106_REG_CALIBRATION                0x26
+#define CAP1106_REG_INT_ENABLE         0x27
 #define CAP1106_REG_REPEAT_RATE                0x28
 #define CAP1106_REG_MT_CONFIG          0x2a
 #define CAP1106_REG_MT_PATTERN_CONFIG  0x2b
index 8d2e19e81e1e59b63c31eccd989b441f503e8706..e651fa692afef3959f0d48ee844a40c3ccb2aba0 100644 (file)
@@ -332,23 +332,24 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev,
        }
 
        if (pdata->clustered_irq > 0) {
-               err = request_irq(pdata->clustered_irq,
+               err = request_any_context_irq(pdata->clustered_irq,
                                matrix_keypad_interrupt,
                                pdata->clustered_irq_flags,
                                "matrix-keypad", keypad);
-               if (err) {
+               if (err < 0) {
                        dev_err(&pdev->dev,
                                "Unable to acquire clustered interrupt\n");
                        goto err_free_rows;
                }
        } else {
                for (i = 0; i < pdata->num_row_gpios; i++) {
-                       err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+                       err = request_any_context_irq(
+                                       gpio_to_irq(pdata->row_gpios[i]),
                                        matrix_keypad_interrupt,
                                        IRQF_TRIGGER_RISING |
                                        IRQF_TRIGGER_FALLING,
                                        "matrix-keypad", keypad);
-                       if (err) {
+                       if (err < 0) {
                                dev_err(&pdev->dev,
                                        "Unable to acquire interrupt for GPIO line %i\n",
                                        pdata->row_gpios[i]);
index a59a1a64b6746745522e1da15e541008d738fcca..35a49bf572273e464399944c05e8072a7b06e138 100644 (file)
@@ -2234,8 +2234,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
                return 0;
        }
 
-       psmouse_info(psmouse,
-                    "Unknown ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+       psmouse_dbg(psmouse,
+                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
 
        return -EINVAL;
 }
@@ -2373,6 +2373,10 @@ int alps_init(struct psmouse *psmouse)
        dev2->keybit[BIT_WORD(BTN_LEFT)] =
                BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
 
+       __set_bit(INPUT_PROP_POINTER, dev2->propbit);
+       if (priv->flags & ALPS_DUALPOINT)
+               __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
+
        if (input_register_device(priv->dev2))
                goto init_fail;
 
index ee2a04d90d20c1bfe1fd785350119a44eb835109..06fc6e76ffbe0cf725053b8a1bdb3faaefa1c629 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
+#include <asm/unaligned.h>
 #include "psmouse.h"
 #include "elantech.h"
 
@@ -403,6 +404,68 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
        input_sync(dev);
 }
 
+static void elantech_report_trackpoint(struct psmouse *psmouse,
+                                      int packet_type)
+{
+       /*
+        * byte 0:  0   0  sx  sy   0   M   R   L
+        * byte 1:~sx   0   0   0   0   0   0   0
+        * byte 2:~sy   0   0   0   0   0   0   0
+        * byte 3:  0   0 ~sy ~sx   0   1   1   0
+        * byte 4: x7  x6  x5  x4  x3  x2  x1  x0
+        * byte 5: y7  y6  y5  y4  y3  y2  y1  y0
+        *
+        * x and y are written in two's complement spread
+        * over 9 bits with sx/sy the relative top bit and
+        * x7..x0 and y7..y0 the lower bits.
+        * The sign of y is opposite to what the input driver
+        * expects for a relative movement
+        */
+
+       struct elantech_data *etd = psmouse->private;
+       struct input_dev *tp_dev = etd->tp_dev;
+       unsigned char *packet = psmouse->packet;
+       int x, y;
+       u32 t;
+
+       if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
+                         !tp_dev,
+                         psmouse_fmt("Unexpected trackpoint message\n"))) {
+               if (etd->debug == 1)
+                       elantech_packet_dump(psmouse);
+               return;
+       }
+
+       t = get_unaligned_le32(&packet[0]);
+
+       switch (t & ~7U) {
+       case 0x06000030U:
+       case 0x16008020U:
+       case 0x26800010U:
+       case 0x36808000U:
+               x = packet[4] - (int)((packet[1]^0x80) << 1);
+               y = (int)((packet[2]^0x80) << 1) - packet[5];
+
+               input_report_key(tp_dev, BTN_LEFT, packet[0] & 0x01);
+               input_report_key(tp_dev, BTN_RIGHT, packet[0] & 0x02);
+               input_report_key(tp_dev, BTN_MIDDLE, packet[0] & 0x04);
+
+               input_report_rel(tp_dev, REL_X, x);
+               input_report_rel(tp_dev, REL_Y, y);
+
+               input_sync(tp_dev);
+
+               break;
+
+       default:
+               /* Dump unexpected packet sequences if debug=1 (default) */
+               if (etd->debug == 1)
+                       elantech_packet_dump(psmouse);
+
+               break;
+       }
+}
+
 /*
  * Interpret complete data packets and report absolute mode input events for
  * hardware version 3. (12 byte packets for two fingers)
@@ -715,6 +778,8 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
 
                if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
                        return PACKET_V3_TAIL;
+               if ((packet[3] & 0x0f) == 0x06)
+                       return PACKET_TRACKPOINT;
        }
 
        return PACKET_UNKNOWN;
@@ -791,14 +856,23 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
 
        case 3:
                packet_type = elantech_packet_check_v3(psmouse);
-               /* ignore debounce */
-               if (packet_type == PACKET_DEBOUNCE)
-                       return PSMOUSE_FULL_PACKET;
-
-               if (packet_type == PACKET_UNKNOWN)
+               switch (packet_type) {
+               case PACKET_UNKNOWN:
                        return PSMOUSE_BAD_DATA;
 
-               elantech_report_absolute_v3(psmouse, packet_type);
+               case PACKET_DEBOUNCE:
+                       /* ignore debounce */
+                       break;
+
+               case PACKET_TRACKPOINT:
+                       elantech_report_trackpoint(psmouse, packet_type);
+                       break;
+
+               default:
+                       elantech_report_absolute_v3(psmouse, packet_type);
+                       break;
+               }
+
                break;
 
        case 4:
@@ -1018,8 +1092,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX31               0x361f00        20, 15, 0e      clickpad
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+ * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
+ * Lenovo L530             0x350f02        b9, 15, 0c      2 hw buttons (*)
  * Samsung NF210           0x150b00        78, 14, 0a      2 hw buttons
  * Samsung NP770Z5E        0x575f01        10, 15, 0f      clickpad
  * Samsung NP700Z5B        0x361f06        21, 15, 0f      clickpad
@@ -1029,6 +1105,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Samsung RF710           0x450f00        ?               2 hw buttons
  * System76 Pangolin       0x250f01        ?               2 hw buttons
  * (*) + 3 trackpoint buttons
+ * (**) + 0 trackpoint buttons
+ * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps
  */
 static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
 {
@@ -1253,6 +1331,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
        if (param[1] == 0)
                return true;
 
+       /*
+        * Some models have a revision higher then 20. Meaning param[2] may
+        * be 10 or 20, skip the rates check for these.
+        */
+       if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+               return true;
+
        for (i = 0; i < ARRAY_SIZE(rates); i++)
                if (param[2] == rates[i])
                        return false;
@@ -1324,6 +1409,10 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
  */
 static void elantech_disconnect(struct psmouse *psmouse)
 {
+       struct elantech_data *etd = psmouse->private;
+
+       if (etd->tp_dev)
+               input_unregister_device(etd->tp_dev);
        sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj,
                           &elantech_attr_group);
        kfree(psmouse->private);
@@ -1438,8 +1527,10 @@ static int elantech_set_properties(struct elantech_data *etd)
 int elantech_init(struct psmouse *psmouse)
 {
        struct elantech_data *etd;
-       int i, error;
+       int i;
+       int error = -EINVAL;
        unsigned char param[3];
+       struct input_dev *tp_dev;
 
        psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
        if (!etd)
@@ -1498,14 +1589,53 @@ int elantech_init(struct psmouse *psmouse)
                goto init_fail;
        }
 
+       /* The MSB indicates the presence of the trackpoint */
+       if ((etd->capabilities[0] & 0x80) == 0x80) {
+               tp_dev = input_allocate_device();
+
+               if (!tp_dev) {
+                       error = -ENOMEM;
+                       goto init_fail_tp_alloc;
+               }
+
+               etd->tp_dev = tp_dev;
+               snprintf(etd->tp_phys, sizeof(etd->tp_phys), "%s/input1",
+                       psmouse->ps2dev.serio->phys);
+               tp_dev->phys = etd->tp_phys;
+               tp_dev->name = "Elantech PS/2 TrackPoint";
+               tp_dev->id.bustype = BUS_I8042;
+               tp_dev->id.vendor  = 0x0002;
+               tp_dev->id.product = PSMOUSE_ELANTECH;
+               tp_dev->id.version = 0x0000;
+               tp_dev->dev.parent = &psmouse->ps2dev.serio->dev;
+               tp_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+               tp_dev->relbit[BIT_WORD(REL_X)] =
+                       BIT_MASK(REL_X) | BIT_MASK(REL_Y);
+               tp_dev->keybit[BIT_WORD(BTN_LEFT)] =
+                       BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) |
+                       BIT_MASK(BTN_RIGHT);
+
+               __set_bit(INPUT_PROP_POINTER, tp_dev->propbit);
+               __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit);
+
+               error = input_register_device(etd->tp_dev);
+               if (error < 0)
+                       goto init_fail_tp_reg;
+       }
+
        psmouse->protocol_handler = elantech_process_byte;
        psmouse->disconnect = elantech_disconnect;
        psmouse->reconnect = elantech_reconnect;
        psmouse->pktsize = etd->hw_version > 1 ? 6 : 4;
 
        return 0;
-
+ init_fail_tp_reg:
+       input_free_device(tp_dev);
+ init_fail_tp_alloc:
+       sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj,
+                          &elantech_attr_group);
  init_fail:
+       psmouse_reset(psmouse);
        kfree(etd);
-       return -1;
+       return error;
 }
index 9e0e2a1f340d52817fc18bf79d849bed6d340932..6f3afec02f03ee0cec8b74cbe13bae6a5cba83c5 100644 (file)
@@ -94,6 +94,7 @@
 #define PACKET_V4_HEAD                 0x05
 #define PACKET_V4_MOTION               0x06
 #define PACKET_V4_STATUS               0x07
+#define PACKET_TRACKPOINT              0x08
 
 /*
  * track up to 5 fingers for v4 hardware
@@ -114,6 +115,8 @@ struct finger_pos {
 };
 
 struct elantech_data {
+       struct input_dev *tp_dev;       /* Relative device for trackpoint */
+       char tp_phys[32];
        unsigned char reg_07;
        unsigned char reg_10;
        unsigned char reg_11;
index cff065f6261cf31a3188226f14fe28e9291f6770..b4e1f014ddc2297affeda8abc73c15954fdfabf0 100644 (file)
@@ -670,6 +670,8 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
        __set_bit(REL_X, input_dev->relbit);
        __set_bit(REL_Y, input_dev->relbit);
 
+       __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
        psmouse->set_rate = psmouse_set_rate;
        psmouse->set_resolution = psmouse_set_resolution;
        psmouse->poll = psmouse_poll;
index e8573c68f77e900e83e054c720443328adfa2f7f..fd23181c1fb741112f1cc33fa0fc6111284823c3 100644 (file)
@@ -629,10 +629,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                         ((buf[0] & 0x04) >> 1) |
                         ((buf[3] & 0x04) >> 2));
 
+               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+                   hw->w == 2) {
+                       synaptics_parse_agm(buf, priv, hw);
+                       return 1;
+               }
+
+               hw->x = (((buf[3] & 0x10) << 8) |
+                        ((buf[1] & 0x0f) << 8) |
+                        buf[4]);
+               hw->y = (((buf[3] & 0x20) << 7) |
+                        ((buf[1] & 0xf0) << 4) |
+                        buf[5]);
+               hw->z = buf[2];
+
                hw->left  = (buf[0] & 0x01) ? 1 : 0;
                hw->right = (buf[0] & 0x02) ? 1 : 0;
 
-               if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+               if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
+                       /*
+                        * ForcePads, like Clickpads, use middle button
+                        * bits to report primary button clicks.
+                        * Unfortunately they report primary button not
+                        * only when user presses on the pad above certain
+                        * threshold, but also when there are more than one
+                        * finger on the touchpad, which interferes with
+                        * out multi-finger gestures.
+                        */
+                       if (hw->z == 0) {
+                               /* No contacts */
+                               priv->press = priv->report_press = false;
+                       } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
+                               /*
+                                * Single-finger touch with pressure above
+                                * the threshold. If pressure stays long
+                                * enough, we'll start reporting primary
+                                * button. We rely on the device continuing
+                                * sending data even if finger does not
+                                * move.
+                                */
+                               if  (!priv->press) {
+                                       priv->press_start = jiffies;
+                                       priv->press = true;
+                               } else if (time_after(jiffies,
+                                               priv->press_start +
+                                                       msecs_to_jiffies(50))) {
+                                       priv->report_press = true;
+                               }
+                       } else {
+                               priv->press = false;
+                       }
+
+                       hw->left = priv->report_press;
+
+               } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                        /*
                         * Clickpad's button is transmitted as middle button,
                         * however, since it is primary button, we will report
@@ -651,21 +702,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                        hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
                }
 
-               if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
-                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
-                   hw->w == 2) {
-                       synaptics_parse_agm(buf, priv, hw);
-                       return 1;
-               }
-
-               hw->x = (((buf[3] & 0x10) << 8) |
-                        ((buf[1] & 0x0f) << 8) |
-                        buf[4]);
-               hw->y = (((buf[3] & 0x20) << 7) |
-                        ((buf[1] & 0xf0) << 4) |
-                        buf[5]);
-               hw->z = buf[2];
-
                if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
                    ((buf[0] ^ buf[3]) & 0x02)) {
                        switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
index e594af0b264b7f147569d77fe5a1d84825ed1a9e..fb2e076738ae3bee7fdd844de7e93dd3fa5146c6 100644 (file)
  * 2   0x08    image sensor            image sensor tracks 5 fingers, but only
  *                                     reports 2.
  * 2   0x20    report min              query 0x0f gives min coord reported
+ * 2   0x80    forcepad                forcepad is a variant of clickpad that
+ *                                     does not have physical buttons but rather
+ *                                     uses pressure above certain threshold to
+ *                                     report primary clicks. Forcepads also have
+ *                                     clickpad bit set.
  */
 #define SYN_CAP_CLICKPAD(ex0c)         ((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)     ((ex0c) & 0x000100) /* 2-button ClickPad */
@@ -86,6 +91,7 @@
 #define SYN_CAP_ADV_GESTURE(ex0c)      ((ex0c) & 0x080000)
 #define SYN_CAP_REDUCED_FILTERING(ex0c)        ((ex0c) & 0x000400)
 #define SYN_CAP_IMAGE_SENSOR(ex0c)     ((ex0c) & 0x000800)
+#define SYN_CAP_FORCEPAD(ex0c)         ((ex0c) & 0x008000)
 
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)           ((m) & (1 << 7))
@@ -177,6 +183,11 @@ struct synaptics_data {
         */
        struct synaptics_hw_state agm;
        bool agm_pending;                       /* new AGM packet received */
+
+       /* ForcePad handling */
+       unsigned long                           press_start;
+       bool                                    press;
+       bool                                    report_press;
 };
 
 void synaptics_module_init(void);
index e122bda16aabddcf555f84dd06eed4dfbebca031..6bcc0189c1c99898dc39bd5ced920f60c13bd553 100644 (file)
@@ -387,6 +387,7 @@ static int synusb_probe(struct usb_interface *intf,
                __set_bit(EV_REL, input_dev->evbit);
                __set_bit(REL_X, input_dev->relbit);
                __set_bit(REL_Y, input_dev->relbit);
+               __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit);
                input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0);
        } else {
                input_set_abs_params(input_dev, ABS_X,
@@ -401,6 +402,11 @@ static int synusb_probe(struct usb_interface *intf,
                __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
        }
 
+       if (synusb->flags & SYNUSB_TOUCHSCREEN)
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+       else
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
        __set_bit(BTN_LEFT, input_dev->keybit);
        __set_bit(BTN_RIGHT, input_dev->keybit);
        __set_bit(BTN_MIDDLE, input_dev->keybit);
index ca843b6cf6bd9f536c984cf455c83952ff8e2a18..30c8b6998808fa452a19e437c33bd7e9db8a0888 100644 (file)
@@ -393,6 +393,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
        if ((button_info & 0x0f) >= 3)
                __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
 
+       __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
+       __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
+
        trackpoint_defaults(psmouse->private);
 
        error = trackpoint_power_on_reset(&psmouse->ps2dev);
index d6aa4c67dbb6208e0584c098864a52e580950cff..93cb7912703cee4a9dbd2f75594a16da2937bcef 100644 (file)
@@ -17,7 +17,6 @@ static int i8042_aux_irq = -1;
 #define I8042_MUX_PHYS_DESC "sparcps2/serio%d"
 
 static void __iomem *kbd_iobase;
-static struct resource *kbd_res;
 
 #define I8042_COMMAND_REG      (kbd_iobase + 0x64UL)
 #define I8042_DATA_REG         (kbd_iobase + 0x60UL)
@@ -44,6 +43,8 @@ static inline void i8042_write_command(int val)
 
 #ifdef CONFIG_PCI
 
+static struct resource *kbd_res;
+
 #define OBP_PS2KBD_NAME1       "kb_ps2"
 #define OBP_PS2KBD_NAME2       "keyboard"
 #define OBP_PS2MS_NAME1                "kdmouse"
index 0cb7ef59071b792a350a914e88f4e9be7da34efc..69175b8253468cf5d2319659b1496e3ac32d5b10 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/serio.h>
 #include <linux/tty.h>
+#include <linux/compat.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Input device TTY line discipline");
@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
        return 0;
 }
 
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
+{
+       struct serport *serport = tty->disc_data;
+
+       serport->id.proto = type & 0x000000ff;
+       serport->id.id    = (type & 0x0000ff00) >> 8;
+       serport->id.extra = (type & 0x00ff0000) >> 16;
+}
+
 /*
  * serport_ldisc_ioctl() allows to set the port protocol, and device ID
  */
 
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
+                              unsigned int cmd, unsigned long arg)
 {
-       struct serport *serport = (struct serport*) tty->disc_data;
-       unsigned long type;
-
        if (cmd == SPIOCSTYPE) {
+               unsigned long type;
+
                if (get_user(type, (unsigned long __user *) arg))
                        return -EFAULT;
 
-               serport->id.proto = type & 0x000000ff;
-               serport->id.id    = (type & 0x0000ff00) >> 8;
-               serport->id.extra = (type & 0x00ff0000) >> 16;
+               serport_set_type(tty, type);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_SPIOCSTYPE      _IOW('q', 0x01, compat_ulong_t)
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+                                      struct file *file,
+                                      unsigned int cmd, unsigned long arg)
+{
+       if (cmd == COMPAT_SPIOCSTYPE) {
+               void __user *uarg = compat_ptr(arg);
+               compat_ulong_t compat_type;
+
+               if (get_user(compat_type, (compat_ulong_t __user *)uarg))
+                       return -EFAULT;
 
+               serport_set_type(tty, compat_type);
                return 0;
        }
 
        return -EINVAL;
 }
+#endif
 
 static void serport_ldisc_write_wakeup(struct tty_struct * tty)
 {
@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = {
        .close =        serport_ldisc_close,
        .read =         serport_ldisc_read,
        .ioctl =        serport_ldisc_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = serport_ldisc_compat_ioctl,
+#endif
        .receive_buf =  serport_ldisc_receive,
        .write_wakeup = serport_ldisc_write_wakeup
 };
index db178ed2b47ee282c27a5f424f45df168b99a091..aaacf8bfa61fded723f48bcb18dc59ebbd10ab86 100644 (file)
@@ -837,7 +837,12 @@ static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
        count = data->msg_buf[0];
 
        if (count == 0) {
-               dev_warn(dev, "Interrupt triggered but zero messages\n");
+               /*
+                * This condition is caused by the CHG line being configured
+                * in Mode 0. It results in unnecessary I2C operations but it
+                * is benign.
+                */
+               dev_dbg(dev, "Interrupt triggered but zero messages\n");
                return IRQ_NONE;
        } else if (count > data->max_reportid) {
                dev_err(dev, "T44 count %d exceeded max report id\n", count);
@@ -1374,11 +1379,16 @@ static int mxt_get_info(struct mxt_data *data)
        return 0;
 }
 
-static void mxt_free_object_table(struct mxt_data *data)
+static void mxt_free_input_device(struct mxt_data *data)
 {
-       input_unregister_device(data->input_dev);
-       data->input_dev = NULL;
+       if (data->input_dev) {
+               input_unregister_device(data->input_dev);
+               data->input_dev = NULL;
+       }
+}
 
+static void mxt_free_object_table(struct mxt_data *data)
+{
        kfree(data->object_table);
        data->object_table = NULL;
        kfree(data->msg_buf);
@@ -1957,11 +1967,13 @@ static int mxt_load_fw(struct device *dev, const char *fn)
                ret = mxt_lookup_bootloader_address(data, 0);
                if (ret)
                        goto release_firmware;
+
+               mxt_free_input_device(data);
+               mxt_free_object_table(data);
        } else {
                enable_irq(data->irq);
        }
 
-       mxt_free_object_table(data);
        reinit_completion(&data->bl_completion);
 
        ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
@@ -2210,6 +2222,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
        return 0;
 
 err_free_object:
+       mxt_free_input_device(data);
        mxt_free_object_table(data);
 err_free_irq:
        free_irq(client->irq, data);
@@ -2224,7 +2237,7 @@ static int mxt_remove(struct i2c_client *client)
 
        sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
        free_irq(data->irq, data);
-       input_unregister_device(data->input_dev);
+       mxt_free_input_device(data);
        mxt_free_object_table(data);
        kfree(data);
 
index 16b52115c27fd49fc9cce79735df7fcad18347c5..705ffa1e064a9ceb391abf51b28343bc3b2ed7ce 100644 (file)
@@ -41,7 +41,7 @@
  */
 static int rpu = 8;
 module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
 
 /*
  * Set current used for pressure measurement.
index 7405353199d7353e31e1bfac4b2f39db3b222fc3..572a5a64face0bff34c0099fa35213442b8a0860 100644 (file)
@@ -41,7 +41,7 @@
  */
 static int rpu = 8;
 module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
 
 /*
  * Set current used for pressure measurement.
index ca18d6d42a9be2a90389882f737a811d2c63dc54..a83cc2a2a2cab5464bd7b4f85d514b00afe580f2 100644 (file)
 #define ID0_CTTW                       (1 << 14)
 #define ID0_NUMIRPT_SHIFT              16
 #define ID0_NUMIRPT_MASK               0xff
+#define ID0_NUMSIDB_SHIFT              9
+#define ID0_NUMSIDB_MASK               0xf
 #define ID0_NUMSMRG_SHIFT              0
 #define ID0_NUMSMRG_MASK               0xff
 
@@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu,
        master->of_node                 = masterspec->np;
        master->cfg.num_streamids       = masterspec->args_count;
 
-       for (i = 0; i < master->cfg.num_streamids; ++i)
-               master->cfg.streamids[i] = masterspec->args[i];
+       for (i = 0; i < master->cfg.num_streamids; ++i) {
+               u16 streamid = masterspec->args[i];
 
+               if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+                    (streamid >= smmu->num_mapping_groups)) {
+                       dev_err(dev,
+                               "stream ID for master device %s greater than maximum allowed (%d)\n",
+                               masterspec->np->name, smmu->num_mapping_groups);
+                       return -ERANGE;
+               }
+               master->cfg.streamids[i] = streamid;
+       }
        return insert_smmu_master(smmu, master);
 }
 
@@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 
        if (fsr & FSR_IGN)
                dev_err_ratelimited(smmu->dev,
-                                   "Unexpected context fault (fsr 0x%u)\n",
+                                   "Unexpected context fault (fsr 0x%x)\n",
                                    fsr);
 
        fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
@@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                        reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
                        break;
                case 39:
+               case 40:
                        reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
                        break;
                case 42:
@@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                        reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
                        break;
                case 39:
+               case 40:
                        reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
                        break;
                case 42:
@@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
        reg |= TTBCR_EAE |
              (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
              (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
-             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
-             (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
+
+       if (!stage1)
+               reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+
        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 
        /* MAIR0 (stage-1 only) */
@@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                                        struct arm_smmu_device *smmu)
 {
-       int irq, ret, start;
+       int irq, start, ret = 0;
+       unsigned long flags;
        struct arm_smmu_domain *smmu_domain = domain->priv;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 
+       spin_lock_irqsave(&smmu_domain->lock, flags);
+       if (smmu_domain->smmu)
+               goto out_unlock;
+
        if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
                /*
                 * We will likely want to change this if/when KVM gets
@@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
                                      smmu->num_context_banks);
        if (IS_ERR_VALUE(ret))
-               return ret;
+               goto out_unlock;
 
        cfg->cbndx = ret;
        if (smmu->version == 1) {
@@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                cfg->irptndx = cfg->cbndx;
        }
 
+       ACCESS_ONCE(smmu_domain->smmu) = smmu;
+       arm_smmu_init_context_bank(smmu_domain);
+       spin_unlock_irqrestore(&smmu_domain->lock, flags);
+
        irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
        ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
                          "arm-smmu-context-fault", domain);
@@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        cfg->irptndx, irq);
                cfg->irptndx = INVALID_IRPTNDX;
-               goto out_free_context;
        }
 
-       smmu_domain->smmu = smmu;
-       arm_smmu_init_context_bank(smmu_domain);
        return 0;
 
-out_free_context:
-       __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+out_unlock:
+       spin_unlock_irqrestore(&smmu_domain->lock, flags);
        return ret;
 }
 
@@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd)
 {
        pgtable_t table = pmd_pgtable(*pmd);
 
-       pgtable_page_dtor(table);
        __free_page(table);
 }
 
@@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        struct arm_smmu_smr *smrs = cfg->smrs;
 
+       if (!smrs)
+               return;
+
        /* Invalidate the SMRs before freeing back to the allocator */
        for (i = 0; i < cfg->num_streamids; ++i) {
                u8 idx = smrs[i].idx;
@@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
        kfree(smrs);
 }
 
-static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
-                                          struct arm_smmu_master_cfg *cfg)
-{
-       int i;
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
-       for (i = 0; i < cfg->num_streamids; ++i) {
-               u16 sid = cfg->streamids[i];
-
-               writel_relaxed(S2CR_TYPE_BYPASS,
-                              gr0_base + ARM_SMMU_GR0_S2CR(sid));
-       }
-}
-
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
                                      struct arm_smmu_master_cfg *cfg)
 {
@@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
                                          struct arm_smmu_master_cfg *cfg)
 {
+       int i;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
        /*
         * We *must* clear the S2CR first, because freeing the SMR means
         * that it can be re-allocated immediately.
         */
-       arm_smmu_bypass_stream_mapping(smmu, cfg);
+       for (i = 0; i < cfg->num_streamids; ++i) {
+               u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+
+               writel_relaxed(S2CR_TYPE_BYPASS,
+                              gr0_base + ARM_SMMU_GR0_S2CR(idx));
+       }
+
        arm_smmu_master_free_smrs(smmu, cfg);
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
-       int ret = -EINVAL;
+       int ret;
        struct arm_smmu_domain *smmu_domain = domain->priv;
-       struct arm_smmu_device *smmu;
+       struct arm_smmu_device *smmu, *dom_smmu;
        struct arm_smmu_master_cfg *cfg;
-       unsigned long flags;
 
        smmu = dev_get_master_dev(dev)->archdata.iommu;
        if (!smmu) {
@@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
         * Sanity check the domain. We don't support domains across
         * different SMMUs.
         */
-       spin_lock_irqsave(&smmu_domain->lock, flags);
-       if (!smmu_domain->smmu) {
+       dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
+       if (!dom_smmu) {
                /* Now that we have a master, we can finalise the domain */
                ret = arm_smmu_init_domain_context(domain, smmu);
                if (IS_ERR_VALUE(ret))
-                       goto err_unlock;
-       } else if (smmu_domain->smmu != smmu) {
+                       return ret;
+
+               dom_smmu = smmu_domain->smmu;
+       }
+
+       if (dom_smmu != smmu) {
                dev_err(dev,
                        "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
-                       dev_name(smmu_domain->smmu->dev),
-                       dev_name(smmu->dev));
-               goto err_unlock;
+                       dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
+               return -EINVAL;
        }
-       spin_unlock_irqrestore(&smmu_domain->lock, flags);
 
        /* Looks ok, so add the device to the domain */
        cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
@@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                return -ENODEV;
 
        return arm_smmu_domain_add_master(smmu_domain, cfg);
-
-err_unlock:
-       spin_unlock_irqrestore(&smmu_domain->lock, flags);
-       return ret;
 }
 
 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
                        return -ENOMEM;
 
                arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
-               if (!pgtable_page_ctor(table)) {
-                       __free_page(table);
-                       return -ENOMEM;
-               }
                pmd_populate(NULL, pmd, table);
                arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
        }
@@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 
        /* Mark all SMRn as invalid and all S2CRn as bypass */
        for (i = 0; i < smmu->num_mapping_groups; ++i) {
-               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+               writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
                writel_relaxed(S2CR_TYPE_BYPASS,
                        gr0_base + ARM_SMMU_GR0_S2CR(i));
        }
@@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev,
                           "\tstream matching with %u register groups, mask 0x%x",
                           smmu->num_mapping_groups, mask);
+       } else {
+               smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
+                                          ID0_NUMSIDB_MASK;
        }
 
        /* ID1 */
@@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * Stage-1 output limited by stage-2 input size due to pgd
         * allocation (PTRS_PER_PGD).
         */
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
 #ifdef CONFIG_64BIT
-       smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
+               smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
 #else
-       smmu->s1_output_size = min(32UL, size);
+               smmu->s1_output_size = min(32UL, size);
 #endif
+       } else {
+               smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT,
+                                            size);
+       }
 
        /* The stage-2 output mask is also applied for bypass */
        size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
@@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
                smmu->irqs[i] = irq;
        }
 
+       err = arm_smmu_device_cfg_probe(smmu);
+       if (err)
+               return err;
+
        i = 0;
        smmu->masters = RB_ROOT;
        while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
@@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        }
        dev_notice(dev, "registered %d master devices\n", i);
 
-       err = arm_smmu_device_cfg_probe(smmu);
-       if (err)
-               goto out_put_masters;
-
        parse_driver_options(smmu);
 
        if (smmu->version > 1 &&
index 60ab474bfff38e58ba1e246d7da6ca3a201677cb..06d268abe951bebd946deac82cf092f3f7a67ee3 100644 (file)
@@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void)
                                       andd->device_name);
                                continue;
                        }
-                       acpi_bus_get_device(h, &adev);
-                       if (!adev) {
+                       if (acpi_bus_get_device(h, &adev)) {
                                pr_err("Failed to get device for ACPI object %s\n",
                                       andd->device_name);
                                continue;
index 61d1dafa242d7d01bfff167f610774aff547b1eb..56feed7cec15ca77ef23df1e214e79f58d74935c 100644 (file)
@@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev)
        struct iommu_group *group = ERR_PTR(-ENODEV);
        struct pci_dev *pdev;
        const u32 *prop;
-       int ret, len;
+       int ret = 0, len;
 
        /*
         * For platform devices we allocate a separate group for
@@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev)
        if (IS_ERR(group))
                return PTR_ERR(group);
 
-       ret = iommu_group_add_device(group, dev);
+       /*
+        * Check if device has already been added to an iommu group.
+        * Group could have already been created for a PCI device in
+        * the iommu_group_get_for_dev path.
+        */
+       if (!dev->iommu_group)
+               ret = iommu_group_add_device(group, dev);
 
        iommu_group_put(group);
        return ret;
index ac4adb337038bc990077a2c0557d1f07d8b1de8e..0639b9274b114ac24de51446a636d43f8497557e 100644 (file)
@@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
  */
 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 {
-       struct iommu_group *group = ERR_PTR(-EIO);
+       struct iommu_group *group;
        int ret;
 
        group = iommu_group_get(dev);
        if (group)
                return group;
 
-       if (dev_is_pci(dev))
-               group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
+       if (!dev_is_pci(dev))
+               return ERR_PTR(-EINVAL);
+
+       group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
 
        if (IS_ERR(group))
                return group;
index f8636a650cf6489a16d21b09df4e6c49127c5299..5945223b73fa2ceba647649e6c16c84fc5447d08 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
index 85c2985d8bcb5040aeae9c343cacc1352fafe125..bbbaf5de65d2cda705949998e4e3a62ce813496e 100644 (file)
@@ -220,7 +220,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        of_property_read_u32_index(node,
                                                   "ti,irqs-reserved",
                                                   i, &entry);
-                       if (entry > max) {
+                       if (entry >= max) {
                                pr_err("Invalid reserved entry\n");
                                ret = -EINVAL;
                                goto err_irq_map;
@@ -238,7 +238,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        of_property_read_u32_index(node,
                                                   "ti,irqs-skip",
                                                   i, &entry);
-                       if (entry > max) {
+                       if (entry >= max) {
                                pr_err("Invalid skip entry\n");
                                ret = -EINVAL;
                                goto err_irq_map;
index 57eaa5a0b1e39fb4154946700bf83dcfddc4f9b1..a0698b4f03037f8c8717e34b896b6f454fe28e59 100644 (file)
@@ -36,7 +36,7 @@
 struct gic_chip_data {
        void __iomem            *dist_base;
        void __iomem            **redist_base;
-       void __percpu __iomem   **rdist;
+       void __iomem * __percpu *rdist;
        struct irq_domain       *domain;
        u64                     redist_stride;
        u32                     redist_regions;
@@ -104,7 +104,7 @@ static void gic_redist_wait_for_rwp(void)
 }
 
 /* Low level accessors */
-static u64 gic_read_iar(void)
+static u64 __maybe_unused gic_read_iar(void)
 {
        u64 irqstat;
 
@@ -112,24 +112,24 @@ static u64 gic_read_iar(void)
        return irqstat;
 }
 
-static void gic_write_pmr(u64 val)
+static void __maybe_unused gic_write_pmr(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
 }
 
-static void gic_write_ctlr(u64 val)
+static void __maybe_unused gic_write_ctlr(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
        isb();
 }
 
-static void gic_write_grpen1(u64 val)
+static void __maybe_unused gic_write_grpen1(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
        isb();
 }
 
-static void gic_write_sgi1r(u64 val)
+static void __maybe_unused gic_write_sgi1r(u64 val)
 {
        asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
 }
@@ -200,19 +200,6 @@ static void gic_poke_irq(struct irq_data *d, u32 offset)
        rwp_wait();
 }
 
-static int gic_peek_irq(struct irq_data *d, u32 offset)
-{
-       u32 mask = 1 << (gic_irq(d) % 32);
-       void __iomem *base;
-
-       if (gic_irq_in_rdist(d))
-               base = gic_data_rdist_sgi_base();
-       else
-               base = gic_data.dist_base;
-
-       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
-}
-
 static void gic_mask_irq(struct irq_data *d)
 {
        gic_poke_irq(d, GICD_ICENABLER);
@@ -401,6 +388,19 @@ static void gic_cpu_init(void)
 }
 
 #ifdef CONFIG_SMP
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d))
+               base = gic_data_rdist_sgi_base();
+       else
+               base = gic_data.dist_base;
+
+       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
 static int gic_secondary_init(struct notifier_block *nfb,
                              unsigned long action, void *hcpu)
 {
index 4b959e606fe8d64494af44f9f8f66f6f069c22d3..dda6dbc23565aa48593d5e3112da13a58acdc3f7 100644 (file)
@@ -867,7 +867,7 @@ static int gic_routable_irq_domain_xlate(struct irq_domain *d,
        return 0;
 }
 
-const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
+static const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
        .map = gic_routable_irq_domain_map,
        .unmap = gic_routable_irq_domain_unmap,
        .xlate = gic_routable_irq_domain_xlate,
index 129729d35478bf5934cb69c90a688a9c43399759..aa29198fca3e2b2c892268eb2549444b1beb499c 100644 (file)
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/timer.h>
 #include <linux/err.h>
 #include <linux/ctype.h>
 #include <linux/leds.h>
-#include <linux/workqueue.h>
 #include "leds.h"
 
 static struct class *leds_class;
@@ -97,10 +97,9 @@ static const struct attribute_group *led_groups[] = {
        NULL,
 };
 
-static void led_work_function(struct work_struct *ws)
+static void led_timer_function(unsigned long data)
 {
-       struct led_classdev *led_cdev =
-               container_of(ws, struct led_classdev, blink_work.work);
+       struct led_classdev *led_cdev = (void *)data;
        unsigned long brightness;
        unsigned long delay;
 
@@ -144,8 +143,7 @@ static void led_work_function(struct work_struct *ws)
                }
        }
 
-       queue_delayed_work(system_wq, &led_cdev->blink_work,
-                          msecs_to_jiffies(delay));
+       mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
 }
 
 static void set_brightness_delayed(struct work_struct *ws)
@@ -233,7 +231,9 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 
        INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
 
-       INIT_DELAYED_WORK(&led_cdev->blink_work, led_work_function);
+       init_timer(&led_cdev->blink_timer);
+       led_cdev->blink_timer.function = led_timer_function;
+       led_cdev->blink_timer.data = (unsigned long)led_cdev;
 
 #ifdef CONFIG_LEDS_TRIGGERS
        led_trigger_set_default(led_cdev);
index 4bb116867b88af7249c1576e614e8ad0685f7945..71b40d3bf77604e32829f391b6e804bbeefb0cf1 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/module.h>
 #include <linux/rwsem.h>
 #include <linux/leds.h>
-#include <linux/workqueue.h>
 #include "leds.h"
 
 DECLARE_RWSEM(leds_list_lock);
@@ -52,7 +51,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
                return;
        }
 
-       queue_delayed_work(system_wq, &led_cdev->blink_work, 1);
+       mod_timer(&led_cdev->blink_timer, jiffies + 1);
 }
 
 
@@ -76,7 +75,7 @@ void led_blink_set(struct led_classdev *led_cdev,
                   unsigned long *delay_on,
                   unsigned long *delay_off)
 {
-       cancel_delayed_work_sync(&led_cdev->blink_work);
+       del_timer_sync(&led_cdev->blink_timer);
 
        led_cdev->flags &= ~LED_BLINK_ONESHOT;
        led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
@@ -91,7 +90,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
                           int invert)
 {
        if ((led_cdev->flags & LED_BLINK_ONESHOT) &&
-            delayed_work_pending(&led_cdev->blink_work))
+            timer_pending(&led_cdev->blink_timer))
                return;
 
        led_cdev->flags |= LED_BLINK_ONESHOT;
@@ -108,7 +107,7 @@ EXPORT_SYMBOL(led_blink_set_oneshot);
 
 void led_stop_software_blink(struct led_classdev *led_cdev)
 {
-       cancel_delayed_work_sync(&led_cdev->blink_work);
+       del_timer_sync(&led_cdev->blink_timer);
        led_cdev->blink_delay_on = 0;
        led_cdev->blink_delay_off = 0;
 }
@@ -117,7 +116,7 @@ EXPORT_SYMBOL_GPL(led_stop_software_blink);
 void led_set_brightness(struct led_classdev *led_cdev,
                        enum led_brightness brightness)
 {
-       /* delay brightness setting if need to stop soft-blink work */
+       /* delay brightness setting if need to stop soft-blink timer */
        if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
                led_cdev->delayed_set_value = brightness;
                schedule_work(&led_cdev->set_brightness_work);
index 1af40ee209e2b9c0d46b9873f3bc32fd78234cd7..7130505c242550f3321448094e5b110987ca5969 100644 (file)
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
        struct cache *cache = mg->cache;
 
        if (mg->writeback) {
-               cell_defer(cache, mg->old_ocell, false);
                clear_dirty(cache, mg->old_oblock, mg->cblock);
+               cell_defer(cache, mg->old_ocell, false);
                cleanup_migration(mg);
                return;
 
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                }
 
        } else {
+               clear_dirty(cache, mg->new_oblock, mg->cblock);
                if (mg->requeue_holder)
                        cell_defer(cache, mg->new_ocell, true);
                else {
                        bio_endio(mg->new_ocell->holder, 0);
                        cell_defer(cache, mg->new_ocell, false);
                }
-               clear_dirty(cache, mg->new_oblock, mg->cblock);
                cleanup_migration(mg);
        }
 }
index 7ffdb589841ed2fe2e25ec2580218e4dfd07a2a2..7e1efd5f58f0b04ed8b303d142628755f56a72d7 100644 (file)
@@ -79,6 +79,11 @@ static void firmware_load(const struct firmware *fw, void *context)
        u32 jedec_id;
        u32 status;
 
+       if (fw == NULL) {
+               dev_err(&spi->dev, "Cannot load firmware, aborting\n");
+               return;
+       }
+
        if (fw->size == 0) {
                dev_err(&spi->dev, "Error: Firmware size is 0!\n");
                return;
index 5a4bfe33112aba9307bdeac6877a4404b24bc7e7..46c4643b7a0776986890bef600674ec3823d01f0 100644 (file)
@@ -1434,6 +1434,10 @@ static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
 
                                mutex_lock(&chip->mutex);
                                ret = get_chip(map, chip, base, FL_LOCKING);
+                               if (ret) {
+                                       mutex_unlock(&chip->mutex);
+                                       return ret;
+                               }
 
                                /* Enter lock register command */
                                cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
index 059c7414e30318b3ccaab436a248d63319823369..3fe45c705933f08a0aaa5b269362e1afe4300813 100644 (file)
@@ -2177,10 +2177,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                        vp->tx_ring[entry].frag[i+1].addr =
-                                       cpu_to_le32(pci_map_single(
-                                               VORTEX_PCI(vp),
-                                               (void *)skb_frag_address(frag),
-                                               skb_frag_size(frag), PCI_DMA_TODEVICE));
+                                       cpu_to_le32(skb_frag_dma_map(
+                                               &VORTEX_PCI(vp)->dev,
+                                               frag,
+                                               frag->page_offset, frag->size, DMA_TO_DEVICE));
 
                        if (i == skb_shinfo(skb)->nr_frags-1)
                                        vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
index 23578dfee249879064c1b86526ee80aead68b79f..3005155e412b2e0e1ab1d2c969e4895a2723644c 100644 (file)
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
        GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
 }
 
+static inline void greth_enable_tx_and_irq(struct greth_private *greth)
+{
+       wmb(); /* BDs must been written to memory before enabling TX */
+       GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
+}
+
 static inline void greth_disable_tx(struct greth_private *greth)
 {
        GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
@@ -447,29 +453,30 @@ out:
        return err;
 }
 
+static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
+{
+       if (tx_next < tx_last)
+               return (tx_last - tx_next) - 1;
+       else
+               return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
+}
 
 static netdev_tx_t
 greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 {
        struct greth_private *greth = netdev_priv(dev);
        struct greth_bd *bdp;
-       u32 status = 0, dma_addr, ctrl;
+       u32 status, dma_addr;
        int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
        unsigned long flags;
+       u16 tx_last;
 
        nr_frags = skb_shinfo(skb)->nr_frags;
+       tx_last = greth->tx_last;
+       rmb(); /* tx_last is updated by the poll task */
 
-       /* Clean TX Ring */
-       greth_clean_tx_gbit(dev);
-
-       if (greth->tx_free < nr_frags + 1) {
-               spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
-               ctrl = GRETH_REGLOAD(greth->regs->control);
-               /* Enable TX IRQ only if not already in poll() routine */
-               if (ctrl & GRETH_RXI)
-                       GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+       if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
                netif_stop_queue(dev);
-               spin_unlock_irqrestore(&greth->devlock, flags);
                err = NETDEV_TX_BUSY;
                goto out;
        }
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
        /* Linear buf */
        if (nr_frags != 0)
                status = GRETH_TXBD_MORE;
+       else
+               status = GRETH_BD_IE;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                status |= GRETH_TXBD_CSALL;
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 
        /* Enable the descriptor chain by enabling the first descriptor */
        bdp = greth->tx_bd_base + greth->tx_next;
-       greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
-       greth->tx_next = curr_tx;
-       greth->tx_free -= nr_frags + 1;
-
-       wmb();
+       greth_write_bd(&bdp->stat,
+                      greth_read_bd(&bdp->stat) | GRETH_BD_EN);
 
        spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
-       greth_enable_tx(greth);
+       greth->tx_next = curr_tx;
+       greth_enable_tx_and_irq(greth);
        spin_unlock_irqrestore(&greth->devlock, flags);
 
        return NETDEV_TX_OK;
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
        if (greth->tx_free > 0) {
                netif_wake_queue(dev);
        }
-
 }
 
 static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
 {
        struct greth_private *greth;
        struct greth_bd *bdp, *bdp_last_frag;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        u32 stat;
        int nr_frags, i;
+       u16 tx_last;
 
        greth = netdev_priv(dev);
+       tx_last = greth->tx_last;
 
-       while (greth->tx_free < GRETH_TXBD_NUM) {
+       while (tx_last != greth->tx_next) {
 
-               skb = greth->tx_skbuff[greth->tx_last];
+               skb = greth->tx_skbuff[tx_last];
 
                nr_frags = skb_shinfo(skb)->nr_frags;
 
                /* We only clean fully completed SKBs */
-               bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+               bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
 
                GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
                mb();
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
                if (stat & GRETH_BD_EN)
                        break;
 
-               greth->tx_skbuff[greth->tx_last] = NULL;
+               greth->tx_skbuff[tx_last] = NULL;
 
                greth_update_tx_stats(dev, stat);
                dev->stats.tx_bytes += skb->len;
 
-               bdp = greth->tx_bd_base + greth->tx_last;
+               bdp = greth->tx_bd_base + tx_last;
 
-               greth->tx_last = NEXT_TX(greth->tx_last);
+               tx_last = NEXT_TX(tx_last);
 
                dma_unmap_single(greth->dev,
                                 greth_read_bd(&bdp->addr),
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
 
                for (i = 0; i < nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-                       bdp = greth->tx_bd_base + greth->tx_last;
+                       bdp = greth->tx_bd_base + tx_last;
 
                        dma_unmap_page(greth->dev,
                                       greth_read_bd(&bdp->addr),
                                       skb_frag_size(frag),
                                       DMA_TO_DEVICE);
 
-                       greth->tx_last = NEXT_TX(greth->tx_last);
+                       tx_last = NEXT_TX(tx_last);
                }
-               greth->tx_free += nr_frags+1;
                dev_kfree_skb(skb);
        }
+       if (skb) { /* skb is set only if the above while loop was entered */
+               wmb();
+               greth->tx_last = tx_last;
 
-       if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
-               netif_wake_queue(dev);
+               if (netif_queue_stopped(dev) &&
+                   (greth_num_free_bds(tx_last, greth->tx_next) >
+                   (MAX_SKB_FRAGS+1)))
+                       netif_wake_queue(dev);
+       }
 }
 
 static int greth_rx(struct net_device *dev, int limit)
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
        greth = container_of(napi, struct greth_private, napi);
 
 restart_txrx_poll:
-       if (netif_queue_stopped(greth->netdev)) {
-               if (greth->gbit_mac)
-                       greth_clean_tx_gbit(greth->netdev);
-               else
-                       greth_clean_tx(greth->netdev);
-       }
-
        if (greth->gbit_mac) {
+               greth_clean_tx_gbit(greth->netdev);
                work_done += greth_rx_gbit(greth->netdev, budget - work_done);
        } else {
+               if (netif_queue_stopped(greth->netdev))
+                       greth_clean_tx(greth->netdev);
                work_done += greth_rx(greth->netdev, budget - work_done);
        }
 
@@ -983,7 +992,8 @@ restart_txrx_poll:
                spin_lock_irqsave(&greth->devlock, flags);
 
                ctrl = GRETH_REGLOAD(greth->regs->control);
-               if (netif_queue_stopped(greth->netdev)) {
+               if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
+                   (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
                        GRETH_REGSAVE(greth->regs->control,
                                        ctrl | GRETH_TXI | GRETH_RXI);
                        mask = GRETH_INT_RX | GRETH_INT_RE |
index 232a622a85b7006ce8cb235f1d9790ea7b2c4929..ae16ac94daf825a5b800356603cb1c1213636d34 100644 (file)
@@ -107,7 +107,7 @@ struct greth_private {
 
        u16 tx_next;
        u16 tx_last;
-       u16 tx_free;
+       u16 tx_free; /* only used on 10/100Mbit */
        u16 rx_cur;
 
        struct greth_regs *regs;        /* Address of controller registers. */
index 346592dca33ce98dd94e24f33f99eabb373f60cb..a3c11355a34dd199a40252dab901ab23cb908ffb 100644 (file)
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
        struct xgbe_prv_data *pdata = filp->private_data;
        unsigned int value;
 
-       value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
-                                          pdata->debugfs_xpcs_reg);
+       value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+                          pdata->debugfs_xpcs_reg);
 
        return xgbe_common_read(buffer, count, ppos, value);
 }
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
        if (len < 0)
                return len;
 
-       pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
-                                   pdata->debugfs_xpcs_reg, value);
+       XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+                   value);
 
        return len;
 }
index edaca4496264862063835a444594471df29658a2..ea273836d999c854d89d45789e33d1ff81afc92b 100644 (file)
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
 
        /* Clear MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
 
        /* Set MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
 
        /* Enable all counter interrupts */
-       XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
-       XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+       XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+       XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
 }
 
 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
 {
        unsigned int i, count;
 
+       if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+               return 0;
+
        for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
 
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
 }
 
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
-                                                 unsigned char queue_count)
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+                                                 unsigned int queue_count)
 {
        unsigned int q_fifo_size = 0;
        enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
                q_fifo_size = XGBE_FIFO_SIZE_KB(256);
                break;
        }
+
+       /* The configured value is not the actual amount of fifo RAM */
+       q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
        q_fifo_size = q_fifo_size / queue_count;
 
        /* Set the queue fifo size programmable value */
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
                xgbe_disable_rx_vlan_stripping(pdata);
 }
 
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+       bool read_hi;
+       u64 val;
+
+       switch (reg_lo) {
+       /* These registers are always 64 bit */
+       case MMC_TXOCTETCOUNT_GB_LO:
+       case MMC_TXOCTETCOUNT_G_LO:
+       case MMC_RXOCTETCOUNT_GB_LO:
+       case MMC_RXOCTETCOUNT_G_LO:
+               read_hi = true;
+               break;
+
+       default:
+               read_hi = false;
+       };
+
+       val = XGMAC_IOREAD(pdata, reg_lo);
+
+       if (read_hi)
+               val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+       return val;
+}
+
 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
 {
        struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
                stats->txoctetcount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
                stats->txframecount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
                stats->txbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
                stats->txmulticastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
                stats->tx64octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
                stats->tx65to127octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
                stats->tx128to255octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
                stats->tx256to511octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
                stats->tx512to1023octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
                stats->tx1024tomaxoctets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
                stats->txunicastframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
                stats->txmulticastframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
                stats->txbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
                stats->txunderflowerror +=
-                       XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
                stats->txoctetcount_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
                stats->txframecount_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
                stats->txpauseframes +=
-                       XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+                       xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
                stats->txvlanframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
 }
 
 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
                stats->rxframecount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
                stats->rxoctetcount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
                stats->rxoctetcount_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
                stats->rxbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
                stats->rxmulticastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
                stats->rxcrcerror +=
-                       XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
                stats->rxrunterror +=
-                       XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+                       xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
                stats->rxjabbererror +=
-                       XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+                       xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
                stats->rxundersize_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+                       xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
                stats->rxoversize_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+                       xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
                stats->rx64octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
                stats->rx65to127octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
                stats->rx128to255octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
                stats->rx256to511octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
                stats->rx512to1023octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
                stats->rx1024tomaxoctets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
                stats->rxunicastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
                stats->rxlengtherror +=
-                       XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
                stats->rxoutofrangetype +=
-                       XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
                stats->rxpauseframes +=
-                       XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+                       xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
                stats->rxfifooverflow +=
-                       XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+                       xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
                stats->rxvlanframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
 
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
                stats->rxwatchdogerror +=
-                       XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+                       xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
 }
 
 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
 
        stats->txoctetcount_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
 
        stats->txframecount_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
 
        stats->txbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
 
        stats->txmulticastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
 
        stats->tx64octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
 
        stats->tx65to127octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
 
        stats->tx128to255octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
 
        stats->tx256to511octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
 
        stats->tx512to1023octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
 
        stats->tx1024tomaxoctets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
 
        stats->txunicastframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
 
        stats->txmulticastframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
 
        stats->txbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
 
        stats->txunderflowerror +=
-               XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+               xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
 
        stats->txoctetcount_g +=
-               XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
 
        stats->txframecount_g +=
-               XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
 
        stats->txpauseframes +=
-               XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+               xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
 
        stats->txvlanframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
 
        stats->rxframecount_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
 
        stats->rxoctetcount_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
 
        stats->rxoctetcount_g +=
-               XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
 
        stats->rxbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
 
        stats->rxmulticastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
 
        stats->rxcrcerror +=
-               XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+               xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
 
        stats->rxrunterror +=
-               XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+               xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
 
        stats->rxjabbererror +=
-               XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+               xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
 
        stats->rxundersize_g +=
-               XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+               xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
 
        stats->rxoversize_g +=
-               XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+               xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
 
        stats->rx64octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
 
        stats->rx65to127octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
 
        stats->rx128to255octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
 
        stats->rx256to511octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
 
        stats->rx512to1023octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
 
        stats->rx1024tomaxoctets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
 
        stats->rxunicastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
 
        stats->rxlengtherror +=
-               XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+               xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
 
        stats->rxoutofrangetype +=
-               XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+               xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
 
        stats->rxpauseframes +=
-               XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+               xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
 
        stats->rxfifooverflow +=
-               XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+               xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
 
        stats->rxvlanframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
 
        stats->rxwatchdogerror +=
-               XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+               xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
 
        /* Un-freeze counters */
        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
index dc84f7193c2db62aaf5d922f9beb409b13a975c3..b26d75856553bf62c975e533dc815051f7968de5 100644 (file)
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
 
        memset(hw_feat, 0, sizeof(*hw_feat));
 
+       hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
        /* Hardware feature register 0 */
        hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
        hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
index a076aca138a12ce4cdf69fb872d1d61c4202fbfa..46f613028e9c00ba9b659a4b8ccc7a8f5f47c644 100644 (file)
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
                             struct ethtool_drvinfo *drvinfo)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
 
        strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
                sizeof(drvinfo->bus_info));
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
        drvinfo->n_stats = XGBE_STATS_COUNT;
 }
 
index 8aa6a9353f7bc5b5b3cf7592bc42186a93ca0bce..bdf9cfa70e88c4dac3c7f3c439fa2b3eef29aaa6 100644 (file)
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
                }
 
                if (i < pdata->rx_ring_count) {
-                       spin_lock_init(&tx_ring->lock);
+                       spin_lock_init(&rx_ring->lock);
                        channel->rx_ring = rx_ring++;
                }
 
index 07bf70a82908ead8e75b6e4704946ae10c961b37..e9fe6e6ddcc34acd7469aef6b523855e535f5a99 100644 (file)
 #define XGMAC_DRIVER_CONTEXT   1
 #define XGMAC_IOCTL_CONTEXT    2
 
+#define XGBE_FIFO_MAX          81920
 #define XGBE_FIFO_SIZE_B(x)    (x)
 #define XGBE_FIFO_SIZE_KB(x)   (x * 1024)
 
@@ -526,6 +527,9 @@ struct xgbe_desc_if {
  * or configurations are present in the device.
  */
 struct xgbe_hw_features {
+       /* HW Version */
+       unsigned int version;
+
        /* HW Feature Register0 */
        unsigned int gmii;              /* 1000 Mbps support */
        unsigned int vlhash;            /* VLAN Hash Filter */
index 616dff6d3f5f3cc2837cdd9de776e943283a24fc..f4054d242f3c7ac6e52437c5cb795cac8e3da2dd 100644 (file)
@@ -1,5 +1,6 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
+       depends on HAS_DMA
        select PHYLIB
        help
          This is the Ethernet driver for the on-chip ethernet interface on the
index 7dcfb19a31c888894121aa03c83bc8fe439bc4cf..d8d07a818b89bc694a77b2bdc3172f5bbb6d7010 100644 (file)
@@ -84,7 +84,7 @@ config BNX2
 
 config CNIC
        tristate "QLogic CNIC support"
-       depends on PCI
+       depends on PCI && (IPV6 || IPV6=n)
        select BNX2
        select UIO
        ---help---
index 5ba8af50c84f2bb3ebf9300c9ad8765d25a06a8d..c4daa068f1db5e37bdea6f160dde26b7c13d1659 100644 (file)
@@ -2233,7 +2233,12 @@ struct shmem2_region {
        u32 reserved3;                          /* Offset 0x14C */
        u32 reserved4;                          /* Offset 0x150 */
        u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
-       #define LINK_ATTR_SYNC_KR2_ENABLE       (1<<0)
+       #define LINK_ATTR_SYNC_KR2_ENABLE       0x00000001
+       #define LINK_SFP_EEPROM_COMP_CODE_MASK  0x0000ff00
+       #define LINK_SFP_EEPROM_COMP_CODE_SHIFT          8
+       #define LINK_SFP_EEPROM_COMP_CODE_SR    0x00001000
+       #define LINK_SFP_EEPROM_COMP_CODE_LR    0x00002000
+       #define LINK_SFP_EEPROM_COMP_CODE_LRM   0x00004000
 
        u32 reserved5[2];
        u32 reserved6[PORT_MAX];
index 53fb4fa61b405aa540239492fef2d5a4f94fa9bd..549549eaf580f79518295bd4a13234cef8520968 100644 (file)
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
                         LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
 
 #define SFP_EEPROM_CON_TYPE_ADDR               0x2
+       #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
        #define SFP_EEPROM_CON_TYPE_VAL_LC      0x7
        #define SFP_EEPROM_CON_TYPE_VAL_COPPER  0x21
        #define SFP_EEPROM_CON_TYPE_VAL_RJ45    0x22
 
 
-#define SFP_EEPROM_COMP_CODE_ADDR              0x3
-       #define SFP_EEPROM_COMP_CODE_SR_MASK    (1<<4)
-       #define SFP_EEPROM_COMP_CODE_LR_MASK    (1<<5)
-       #define SFP_EEPROM_COMP_CODE_LRM_MASK   (1<<6)
+#define SFP_EEPROM_10G_COMP_CODE_ADDR          0x3
+       #define SFP_EEPROM_10G_COMP_CODE_SR_MASK        (1<<4)
+       #define SFP_EEPROM_10G_COMP_CODE_LR_MASK        (1<<5)
+       #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK       (1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR           0x6
+       #define SFP_EEPROM_1G_COMP_CODE_SX      (1<<0)
+       #define SFP_EEPROM_1G_COMP_CODE_LX      (1<<1)
+       #define SFP_EEPROM_1G_COMP_CODE_CX      (1<<2)
+       #define SFP_EEPROM_1G_COMP_CODE_BASE_T  (1<<3)
 
 #define SFP_EEPROM_FC_TX_TECH_ADDR             0x8
        #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
                                 reg_set[i].val);
 
        /* Start KR2 work-around timer which handles BCM8073 link-parner */
-       vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
-       bnx2x_update_link_attr(params, vars->link_attr_sync);
+       params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, params->link_attr_sync);
 }
 
 static void bnx2x_disable_kr2(struct link_params *params,
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
        for (i = 0; i < ARRAY_SIZE(reg_set); i++)
                bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
                                 reg_set[i].val);
-       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
-       bnx2x_update_link_attr(params, vars->link_attr_sync);
+       params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, params->link_attr_sync);
 
        vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
 }
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
                                        ~FEATURE_CONFIG_PFC_ENABLED;
 
        if (SHMEM2_HAS(bp, link_attr_sync))
-               vars->link_attr_sync = SHMEM2_RD(bp,
+               params->link_attr_sync = SHMEM2_RD(bp,
                                                 link_attr_sync[params->port]);
 
        DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u32 sync_offset = 0, phy_idx, media_types;
-       u8 gport, val[2], check_limiting_mode = 0;
+       u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
        *edc_mode = EDC_MODE_LIMITING;
        phy->media_type = ETH_PHY_UNSPECIFIED;
        /* First check for copper cable */
        if (bnx2x_read_sfp_module_eeprom(phy,
                                         params,
                                         I2C_DEV_ADDR_A0,
-                                        SFP_EEPROM_CON_TYPE_ADDR,
-                                        2,
+                                        0,
+                                        SFP_EEPROM_FC_TX_TECH_ADDR + 1,
                                         (u8 *)val) != 0) {
                DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
                return -EINVAL;
        }
-
-       switch (val[0]) {
+       params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+       params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+               LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+       bnx2x_update_link_attr(params, params->link_attr_sync);
+       switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
        case SFP_EEPROM_CON_TYPE_VAL_COPPER:
        {
                u8 copper_module_type;
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                /* Check if its active cable (includes SFP+ module)
                 * of passive cable
                 */
-               if (bnx2x_read_sfp_module_eeprom(phy,
-                                              params,
-                                              I2C_DEV_ADDR_A0,
-                                              SFP_EEPROM_FC_TX_TECH_ADDR,
-                                              1,
-                                              &copper_module_type) != 0) {
-                       DP(NETIF_MSG_LINK,
-                               "Failed to read copper-cable-type"
-                               " from SFP+ EEPROM\n");
-                       return -EINVAL;
-               }
+               copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
 
                if (copper_module_type &
                    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                }
                break;
        }
+       case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
        case SFP_EEPROM_CON_TYPE_VAL_LC:
        case SFP_EEPROM_CON_TYPE_VAL_RJ45:
                check_limiting_mode = 1;
-               if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
-                              SFP_EEPROM_COMP_CODE_LR_MASK |
-                              SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+               if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+                    (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+                     SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+                     SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
                        DP(NETIF_MSG_LINK, "1G SFP module detected\n");
-                       gport = params->port;
                        phy->media_type = ETH_PHY_SFP_1G_FIBER;
                        if (phy->req_line_speed != SPEED_1000) {
+                               u8 gport = params->port;
                                phy->req_line_speed = SPEED_1000;
                                if (!CHIP_IS_E1x(bp)) {
                                        gport = BP_PATH(bp) +
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                                           "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
                                           gport);
                        }
+                       if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+                           SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+                               bnx2x_sfp_set_transmitter(params, phy, 0);
+                               msleep(40);
+                               bnx2x_sfp_set_transmitter(params, phy, 1);
+                       }
                } else {
                        int idx, cfg_idx = 0;
                        DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                break;
        default:
                DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
-                        val[0]);
+                        val[SFP_EEPROM_CON_TYPE_ADDR]);
                return -EINVAL;
        }
        sync_offset = params->shmem_base +
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
 
        sigdet = bnx2x_warpcore_get_sigdet(phy, params);
        if (!sigdet) {
-               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+               if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
                        bnx2x_kr2_recovery(params, vars, phy);
                        DP(NETIF_MSG_LINK, "No sigdet\n");
                }
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
 
        /* CL73 has not begun yet */
        if (base_page == 0) {
-               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+               if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
                        bnx2x_kr2_recovery(params, vars, phy);
                        DP(NETIF_MSG_LINK, "No BP\n");
                }
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
                            ((next_page & 0xe0) == 0x20))));
 
        /* In case KR2 is already disabled, check if we need to re-enable it */
-       if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+       if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
                if (!not_kr2_device) {
                        DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
                           next_page);
index 389f5f8cb0a3c3108f2305b62c0843037ea4f72e..d9cce4c3899b7b9d388cf28a6f4bf0feece3c4b2 100644 (file)
@@ -323,6 +323,9 @@ struct link_params {
 #define LINK_FLAGS_INT_DISABLED                (1<<0)
 #define PHY_INITIALIZED                (1<<1)
        u32 lfa_base;
+
+       /* The same definitions as the shmem2 parameter */
+       u32 link_attr_sync;
 };
 
 /* Output parameters */
@@ -364,8 +367,6 @@ struct link_vars {
        u8 rx_tx_asic_rst;
        u8 turn_to_run_wc_rt;
        u16 rsrv2;
-       /* The same definitions as the shmem2 parameter */
-       u32 link_attr_sync;
 };
 
 /***********************************************************/
index 900cab42081068e699e4bc8e2e0bec71659c604d..d1c093dcb054aebb71b1ee701cf175e8ab804515 100644 (file)
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
        bnx2x_release_phy_lock(bp);
 }
 
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+       /* make sure this value is 0 */
+       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+       bnx2x_config_endianity(bp, 1);
+#else
+       bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+       bnx2x_config_endianity(bp, 0);
+}
+
 /**
  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
  *
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
 
        bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
        bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
-       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
-       /* make sure this value is 0 */
-       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/*     REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
-       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
+       bnx2x_set_endianity(bp);
        bnx2x_ilt_init_page_size(bp, INITOP_SET);
 
        if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
        bnx2x_iov_remove_one(bp);
 
        /* Power on: we can't let PCI layer write to us while we are in D3 */
-       if (IS_PF(bp))
+       if (IS_PF(bp)) {
                bnx2x_set_power_state(bp, PCI_D0);
 
+               /* Set endianity registers to reset values in case next driver
+                * boots in different endianty environment.
+                */
+               bnx2x_reset_endianity(bp);
+       }
+
        /* Disable MSI/MSI-X */
        bnx2x_disable_msi(bp);
 
index 27861a6c7ca55966048b4cb94b0b92a0dde1d269..a6a9f284c8dd762579e062a9d9d62f03e1ce6f96 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/if_vlan.h>
 #include <linux/prefetch.h>
 #include <linux/random.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define BCM_VLAN 1
 #endif
 #include <net/ip.h>
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
                             struct dst_entry **dst)
 {
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_IPV6)
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
index 3ac5d23454a8dae9b2e618205d14dc91d822f1fe..cb77ae93d89a120591afc700ae4650d4f4002291 100644 (file)
@@ -11617,6 +11617,12 @@ static int tg3_open(struct net_device *dev)
        struct tg3 *tp = netdev_priv(dev);
        int err;
 
+       if (tp->pcierr_recovery) {
+               netdev_err(dev, "Failed to open device. PCI error recovery "
+                          "in progress\n");
+               return -EAGAIN;
+       }
+
        if (tp->fw_needed) {
                err = tg3_request_firmware(tp);
                if (tg3_asic_rev(tp) == ASIC_REV_57766) {
@@ -11674,6 +11680,12 @@ static int tg3_close(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
+       if (tp->pcierr_recovery) {
+               netdev_err(dev, "Failed to close device. PCI error recovery "
+                          "in progress\n");
+               return -EAGAIN;
+       }
+
        tg3_ptp_fini(tp);
 
        tg3_stop(tp);
@@ -17561,6 +17573,7 @@ static int tg3_init_one(struct pci_dev *pdev,
        tp->rx_mode = TG3_DEF_RX_MODE;
        tp->tx_mode = TG3_DEF_TX_MODE;
        tp->irq_sync = 1;
+       tp->pcierr_recovery = false;
 
        if (tg3_debug > 0)
                tp->msg_enable = tg3_debug;
@@ -18071,6 +18084,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
+       tp->pcierr_recovery = true;
+
        /* We probably don't have netdev yet */
        if (!netdev || !netif_running(netdev))
                goto done;
@@ -18195,6 +18210,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
        tg3_phy_start(tp);
 
 done:
+       tp->pcierr_recovery = false;
        rtnl_unlock();
 }
 
index 461accaf0aa40242c3756880dd6659371cdfe5f0..31c9f829595384cb843e299c6b51054101d8f6c6 100644 (file)
@@ -3407,6 +3407,7 @@ struct tg3 {
 
        struct device                   *hwmon_dev;
        bool                            link_up;
+       bool                            pcierr_recovery;
 };
 
 /* Accessor macros for chip and asic attributes
index ff8cae5e2535b6e068159180105f84b2ff922b1d..ffc92a41d75be550d27698af6ca3e600d9a146fe 100644 (file)
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
         * For TSO, the TCP checksum field is seeded with pseudo-header sum
         * excluding the length field.
         */
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
 
                /* Do we really need these? */
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
                }
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       __be16 net_proto = vlan_get_protocol(skb);
                        u8 proto = 0;
 
-                       if (skb->protocol == htons(ETH_P_IP))
+                       if (net_proto == htons(ETH_P_IP))
                                proto = ip_hdr(skb)->protocol;
 #ifdef NETIF_F_IPV6_CSUM
-                       else if (skb->protocol == htons(ETH_P_IPV6)) {
+                       else if (net_proto == htons(ETH_P_IPV6)) {
                                /* nexthdr may not be TCP immediately. */
                                proto = ipv6_hdr(skb)->nexthdr;
                        }
index 184a063bed5fa59bbdc705e29355134a2d31d731..07d2201530d26c85e26cf0987553451acad936a6 100644 (file)
@@ -1,6 +1,7 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
        depends on HAS_IOMEM && HAS_DMA
+       depends on ARCH_HIGHBANK || COMPILE_TEST
        select CRC32
        help
          This is the driver for the XGMAC Ethernet IP block found on Calxeda
index 18fb9c61d7bacfd19319a664d2287786938ecec7..8c34811a112843239cb50d496b50fe73c5647472 100644 (file)
@@ -1253,7 +1253,9 @@ freeout:  t4_free_sge_resources(adap);
                        goto freeout;
        }
 
-       t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+       t4_write_reg(adap, is_t4(adap->params.chip) ?
+                               MPS_TRC_RSS_CONTROL :
+                               MPS_T5_TRC_RSS_CONTROL,
                     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
                     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
        return 0;
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
                0xd004, 0xd03c,
                0xdfc0, 0xdfe0,
                0xe000, 0xea7c,
-               0xf000, 0x11190,
+               0xf000, 0x11110,
+               0x11118, 0x11190,
                0x19040, 0x1906c,
                0x19078, 0x19080,
                0x1908c, 0x19124,
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
                0xd004, 0xd03c,
                0xdfc0, 0xdfe0,
                0xe000, 0x11088,
-               0x1109c, 0x1117c,
+               0x1109c, 0x11110,
+               0x11118, 0x1117c,
                0x11190, 0x11204,
                0x19040, 0x1906c,
                0x19078, 0x19080,
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(CQ_END);
                params[4] = FW_PARAM_PFVF(OCQ_START);
                params[5] = FW_PARAM_PFVF(OCQ_END);
-               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+                                     val);
                if (ret < 0)
                        goto bye;
                adap->vres.qp.start = val[0];
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
 
                params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
                params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
-               ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+                                     val);
                if (ret < 0) {
                        adap->params.max_ordird_qp = 8;
                        adap->params.max_ird_adapter = 32 * adap->tids.ntids;
index a853133d8db826029ac042006e89c6d99348f2d7..41d04462b72eb158f7699c38000e6331291d2447 100644 (file)
@@ -167,6 +167,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
        t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
 }
 
+/*
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+       static const char *const reason[] = {
+               "Crash",                        /* PCIE_FW_EVAL_CRASH */
+               "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
+               "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
+               "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+               "Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+               "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
+               "Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+               "Reserved",                     /* reserved */
+       };
+       u32 pcie_fw;
+
+       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+       if (pcie_fw & FW_PCIE_FW_ERR)
+               dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+                       reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+}
+
 /*
  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  */
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        dump_mbox(adap, mbox, data_reg);
        dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
                *(const u8 *)cmd, mbox);
+       t4_report_fw_error(adap);
        return -ETIMEDOUT;
 }
 
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
+#define CHELSIO_VPD_UNIQUE_ID 0x82
 
 /**
  *     t4_seeprom_wp - enable/disable EEPROM write protection
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
        if (ret < 0)
                goto out;
-       addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+       /* The VPD shall have a unique identifier specified by the PCI SIG.
+        * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+        * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+        * is expected to automatically put this entry at the
+        * beginning of the VPD.
+        */
+       addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
 
        ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
        if (ret < 0)
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
        strim(p->sn);
+       i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->pn, vpd + pn, min(i, PN_LEN));
        strim(p->pn);
 
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
 
        int fat;
 
-       fat = t4_handle_intr_status(adapter,
-                                   PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-                                   sysbus_intr_info) +
-             t4_handle_intr_status(adapter,
-                                   PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
-                                   pcie_port_intr_info) +
-             t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
-                                   is_t4(adapter->params.chip) ?
-                                   pcie_intr_info : t5_pcie_intr_info);
+       if (is_t4(adapter->params.chip))
+               fat = t4_handle_intr_status(adapter,
+                                           PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+                                           sysbus_intr_info) +
+                       t4_handle_intr_status(adapter,
+                                             PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+                                             pcie_port_intr_info) +
+                       t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+                                             pcie_intr_info);
+       else
+               fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+                                           t5_pcie_intr_info);
 
        if (fat)
                t4_fatal_err(adapter);
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
 
        int fat;
 
+       if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+               t4_report_fw_error(adapter);
+
        fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
                                    cim_intr_info) +
              t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
 {
        u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
 
-       if (status & MEM_PERR_INT_CAUSE)
+       if (status & MEM_PERR_INT_CAUSE) {
                dev_alert(adap->pdev_dev,
                          "MA parity error, parity status %#x\n",
                          t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+               if (is_t5(adap->params.chip))
+                       dev_alert(adap->pdev_dev,
+                                 "MA parity error, parity status %#x\n",
+                                 t4_read_reg(adap,
+                                             MA_PARITY_ERROR_STATUS2));
+       }
        if (status & MEM_WRAP_INT_CAUSE) {
                v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
                dev_alert(adap->pdev_dev, "MA address wrap-around error by "
@@ -2733,12 +2783,16 @@ retry:
        /*
         * Issue the HELLO command to the firmware.  If it's not successful
         * but indicates that we got a "busy" or "timeout" condition, retry
-        * the HELLO until we exhaust our retry limit.
+        * the HELLO until we exhaust our retry limit.  If we do exceed our
+        * retry limit, check to see if the firmware left us any error
+        * information and report that if so.
         */
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
        if (ret < 0) {
                if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
                        goto retry;
+               if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+                       t4_report_fw_error(adap);
                return ret;
        }
 
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
                        lc->link_ok = link_ok;
                        lc->speed = speed;
                        lc->fc = fc;
+                       lc->supported = be16_to_cpu(p->u.info.pcap);
                        t4_os_link_changed(adap, port, link_ok);
                }
                if (mod != pi->mod_type) {
index e3146e83df2043ae59436e7eff6b8c276a3fb3e4..39fb325474f7e7499534d142d47cdcd0b7fbc1f4 100644 (file)
 #define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
 #define MA_PCIE_FW 0x30b8
 #define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_PARITY_ERROR_STATUS2 0x7804
 
 #define MA_EXT_MEMORY1_BAR 0x7808
 #define EDC_0_BASE_ADDR 0x7900
 #define  TRCMULTIFILTER     0x00000001U
 
 #define MPS_TRC_RSS_CONTROL 0x9808
+#define MPS_T5_TRC_RSS_CONTROL 0xa00c
 #define  RSSCONTROL_MASK    0x00ff0000U
 #define  RSSCONTROL_SHIFT   16
 #define  RSSCONTROL(x)      ((x) << RSSCONTROL_SHIFT)
index 5f2729ebadbe14c4d6c1e56fcc29d078380db437..3409756a85b95586f7640033dc06bb274c4db139 100644 (file)
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd {
 #define FW_PCIE_FW_MASTER(x)     ((x) << FW_PCIE_FW_MASTER_SHIFT)
 #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
                                 FW_PCIE_FW_MASTER_MASK)
+#define FW_PCIE_FW_EVAL_MASK   0x7
+#define FW_PCIE_FW_EVAL_SHIFT  24
+#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
+                                FW_PCIE_FW_EVAL_MASK)
 
 struct fw_hdr {
        u8 ver;
index a0b418e007a0d09303a32838d8acf1a06ca9d061..566b17db135a306a2bb8eea0ce167a70e6e165f4 100644 (file)
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
 {
        swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
 
-       if (skb->protocol != htons(ETH_P_IP))
+       if (vlan_get_protocol(skb) != htons(ETH_P_IP))
                return;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
index cbc330b301cdcfb8dd7e62695700a4e4ee2920e6..ad3d5d12173faea9992f9e46f74f57e21043b05d 100644 (file)
@@ -2674,7 +2674,8 @@ set_itr_now:
 #define E1000_TX_FLAGS_VLAN_SHIFT      16
 
 static int e1000_tso(struct e1000_adapter *adapter,
-                    struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+                    struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+                    __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
 
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                mss = skb_shinfo(skb)->gso_size;
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
                                                                 0);
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb_transport_offset(skb) - 1;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (skb_is_gso_v6(skb)) {
                        ipv6_hdr(skb)->payload_len = 0;
                        tcp_hdr(skb)->check =
                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
 }
 
 static bool e1000_tx_csum(struct e1000_adapter *adapter,
-                         struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+                         struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+                         __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
        if (skb->ip_summed != CHECKSUM_PARTIAL)
                return false;
 
-       switch (skb->protocol) {
+       switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
                        cmd_len |= E1000_TXD_CMD_TCP;
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        int count = 0;
        int tso;
        unsigned int f;
+       __be16 protocol = vlan_get_protocol(skb);
 
        /* This goes back to the question of how to logically map a Tx queue
         * to a flow.  Right now, performance is impacted slightly negatively
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        first = tx_ring->next_to_use;
 
-       tso = e1000_tso(adapter, tx_ring, skb);
+       tso = e1000_tso(adapter, tx_ring, skb, protocol);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                if (likely(hw->mac_type != e1000_82544))
                        tx_ring->last_tx_tso = true;
                tx_flags |= E1000_TX_FLAGS_TSO;
-       } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+       } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
                tx_flags |= E1000_TX_FLAGS_CSUM;
 
-       if (likely(skb->protocol == htons(ETH_P_IP)))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
        if (unlikely(skb->no_fcs))
index 65c3aef2bd36a2ee01bab5a11649caa589b83c95..247335d2c7ec26cb9c50bb93c6a675b4ff35879a 100644 (file)
@@ -5164,7 +5164,8 @@ link_up:
 #define E1000_TX_FLAGS_VLAN_MASK       0xffff0000
 #define E1000_TX_FLAGS_VLAN_SHIFT      16
 
-static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
+                    __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
 
        hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        mss = skb_shinfo(skb)->gso_size;
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        return 1;
 }
 
-static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
+                         __be16 protocol)
 {
        struct e1000_adapter *adapter = tx_ring->adapter;
        struct e1000_context_desc *context_desc;
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        unsigned int i;
        u8 css;
        u32 cmd_len = E1000_TXD_CMD_DEXT;
-       __be16 protocol;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
                return false;
 
-       if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
-       else
-               protocol = skb->protocol;
-
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        int count = 0;
        int tso;
        unsigned int f;
+       __be16 protocol = vlan_get_protocol(skb);
 
        if (test_bit(__E1000_DOWN, &adapter->state)) {
                dev_kfree_skb_any(skb);
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        first = tx_ring->next_to_use;
 
-       tso = e1000_tso(tx_ring, skb);
+       tso = e1000_tso(tx_ring, skb, protocol);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 
        if (tso)
                tx_flags |= E1000_TX_FLAGS_TSO;
-       else if (e1000_tx_csum(tx_ring, skb))
+       else if (e1000_tx_csum(tx_ring, skb, protocol))
                tx_flags |= E1000_TX_FLAGS_CSUM;
 
        /* Old method was to assume IPv4 packet by default if TSO was enabled.
         * 82571 hardware supports TSO capabilities for IPv6 as well...
         * no longer assume, we must.
         */
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
        if (unlikely(skb->no_fcs))
index a51aa37b7b5af10a5204c1ef8329ce9229b1ce57..369848e107f8ed37952b4849ed632655f598b6ba 100644 (file)
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                goto out_drop;
 
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
 
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
index 79bf96ca648954b390dbf60728ae687181292097..95a3ec236b4951ab7166637159393df569da97fb 100644 (file)
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                goto out_drop;
 
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
 
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
index c9f1d1b7ef378bef042b12c9e00f5bf2786594a8..ade067de168959b30c9e7eafc07ed40df0967ee0 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mbus.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <linux/io.h>
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
 {
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                int ip_hdr_len = 0;
+               __be16 l3_proto = vlan_get_protocol(skb);
                u8 l4_proto;
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (l3_proto == htons(ETH_P_IP)) {
                        struct iphdr *ip4h = ip_hdr(skb);
 
                        /* Calculate IPv4 checksum and L4 checksum */
                        ip_hdr_len = ip4h->ihl;
                        l4_proto = ip4h->protocol;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
                        /* Read l4_protocol from one of IPv6 extra headers */
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
                        return MVNETA_TX_L4_CSUM_NOT;
 
                return mvneta_txq_desc_csum(skb_network_offset(skb),
-                               skb->protocol, ip_hdr_len, l4_proto);
+                                           l3_proto, ip_hdr_len, l4_proto);
        }
 
        return MVNETA_TX_L4_CSUM_NOT;
index bb536aa613f483434f35c4bde31ae36ab9ced290..abddcf8c40aa120c4d4ade2822ccef68d00ea434 100644 (file)
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
                                    int qpn, u64 *reg_id)
 {
        int err;
-       struct mlx4_spec_list spec_eth_outer = { {NULL} };
-       struct mlx4_spec_list spec_vxlan     = { {NULL} };
-       struct mlx4_spec_list spec_eth_inner = { {NULL} };
-
-       struct mlx4_net_trans_rule rule = {
-               .queue_mode = MLX4_NET_TRANS_Q_FIFO,
-               .exclusive = 0,
-               .allow_loopback = 1,
-               .promisc_mode = MLX4_FS_REGULAR,
-               .priority = MLX4_DOMAIN_NIC,
-       };
-
-       __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
 
        if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                return 0; /* do nothing */
 
-       rule.port = priv->port;
-       rule.qpn = qpn;
-       INIT_LIST_HEAD(&rule.list);
-
-       spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
-       memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
-       memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
-
-       spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
-       spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;  /* any inner eth header */
-
-       list_add_tail(&spec_eth_outer.list, &rule.list);
-       list_add_tail(&spec_vxlan.list,     &rule.list);
-       list_add_tail(&spec_eth_inner.list, &rule.list);
-
-       err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+       err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
+                                   MLX4_DOMAIN_NIC, reg_id);
        if (err) {
                en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
                return err;
index d80e7a6fac74c4381cea11f946c889216a5ce05d..ca0f98c951054945cd29fc3be2284fc7eeb7d729 100644 (file)
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
 }
 EXPORT_SYMBOL_GPL(mlx4_flow_detach);
 
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+                         int port, int qpn, u16 prio, u64 *reg_id)
+{
+       int err;
+       struct mlx4_spec_list spec_eth_outer = { {NULL} };
+       struct mlx4_spec_list spec_vxlan     = { {NULL} };
+       struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+       struct mlx4_net_trans_rule rule = {
+               .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+               .exclusive = 0,
+               .allow_loopback = 1,
+               .promisc_mode = MLX4_FS_REGULAR,
+       };
+
+       __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+       rule.port = port;
+       rule.qpn = qpn;
+       rule.priority = prio;
+       INIT_LIST_HEAD(&rule.list);
+
+       spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+       memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+       memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+       spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
+       spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;  /* any inner eth header */
+
+       list_add_tail(&spec_eth_outer.list, &rule.list);
+       list_add_tail(&spec_vxlan.list,     &rule.list);
+       list_add_tail(&spec_eth_inner.list, &rule.list);
+
+       err = mlx4_flow_attach(dev, &rule, reg_id);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_tunnel_steer_add);
+
 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
                                      u32 max_range_qpn)
 {
index 5020fd47825d65c359f010a9bcbf0f2e9a0ceaf7..2f12c88c66abfa1cd0bb0e099e4a33a761327fd6 100644 (file)
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
        int rx_head = priv->rx_head;
        int rx = 0;
 
-       while (1) {
+       while (rx < budget) {
                desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
                desc0 = readl(desc + RX_REG_OFFSET_DESC0);
 
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                        net_dbg_ratelimited("packet error\n");
                        priv->stats.rx_dropped++;
                        priv->stats.rx_errors++;
-                       continue;
+                       goto rx_next;
                }
 
                len = desc0 & RX_DESC0_FRAME_LEN_MASK;
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (len > RX_BUF_SIZE)
                        len = RX_BUF_SIZE;
 
-               skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+               dma_sync_single_for_cpu(&ndev->dev,
+                                       priv->rx_mapping[rx_head],
+                                       priv->rx_buf_size, DMA_FROM_DEVICE);
+               skb = netdev_alloc_skb_ip_align(ndev, len);
+
                if (unlikely(!skb)) {
-                       net_dbg_ratelimited("build_skb failed\n");
+                       net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
                        priv->stats.rx_dropped++;
                        priv->stats.rx_errors++;
+                       goto rx_next;
                }
 
+               memcpy(skb->data, priv->rx_buf[rx_head], len);
                skb_put(skb, len);
                skb->protocol = eth_type_trans(skb, ndev);
                napi_gro_receive(&priv->napi, skb);
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (desc0 & RX_DESC0_MULTICAST)
                        priv->stats.multicast++;
 
+rx_next:
                writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
 
                rx_head = RX_NEXT(rx_head);
                priv->rx_head = rx_head;
-
-               if (rx >= budget)
-                       break;
        }
 
        if (rx < budget) {
-               napi_gro_flush(napi, false);
-               __napi_complete(napi);
+               napi_complete(napi);
        }
 
        priv->reg_imr |= RPKT_FINISH_M;
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                len = ETH_ZLEN;
        }
 
-       txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
-       txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
-       txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
-       txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+       dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+                                  priv->tx_buf_size, DMA_TO_DEVICE);
+
+       txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
+       if (tx_head == TX_DESC_NUM_MASK)
+               txdes1 |= TX_DESC1_END;
        writel(txdes1, desc + TX_REG_OFFSET_DESC1);
        writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
 
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
        spin_lock_init(&priv->txlock);
 
        priv->tx_buf_size = TX_BUF_SIZE;
-       priv->rx_buf_size = RX_BUF_SIZE +
-                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       priv->rx_buf_size = RX_BUF_SIZE;
 
        priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
index 8706c0dbd0c36a2c3b7af167eaa4c29cda3905a5..a44a03c45014903a6410ebe2d665f3651ee30db2 100644 (file)
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
 
        __lpc_eth_clock_enable(pldat, true);
 
+       /* Suspended PHY makes LPC ethernet core block, so resume now */
+       phy_resume(pldat->phy_dev);
+
        /* Reset and initialize */
        __lpc_eth_reset(pldat);
        __lpc_eth_init(pldat);
index 188626e2a861d317510617dce742595b88bd3abd..3e96f269150d197253fdcdf3066c1a2da5b62985 100644 (file)
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
 
        if (skb_is_gso(skb)) {
                int err;
+               __be16 l3_proto = vlan_get_protocol(skb);
 
                err = skb_cow_head(skb, 0);
                if (err < 0)
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
                                << OB_MAC_TRANSPORT_HDR_SHIFT);
                mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
-               if (likely(skb->protocol == htons(ETH_P_IP))) {
+               if (likely(l3_proto == htons(ETH_P_IP))) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->check = 0;
                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
                                                                 iph->daddr, 0,
                                                                 IPPROTO_TCP,
                                                                 0);
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
                        tcp_hdr(skb)->check =
                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
index 9e757c792d846f300288c9848225e1db9a9584fa..196e98a2d93bbfa905a0b13d8a372da00add0f7b 100644 (file)
@@ -5,6 +5,7 @@
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
        depends on HAS_DMA
+       depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
        select CRC32
        select MII
        select MDIO_BITBANG
index c553f6b5a9131f0af16230f59ccd0557fe1116a5..cf28daba4346f0c288d1718513554b38e4de2176 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "stmmac.h"
 
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 {
        struct stmmac_priv *priv = (struct stmmac_priv *)p;
        unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
        desc->des2 = dma_map_single(priv->device, skb->data,
                                    bmax, DMA_TO_DEVICE);
-       priv->tx_skbuff_dma[entry] = desc->des2;
+       if (dma_mapping_error(priv->device, desc->des2))
+               return -1;
+       priv->tx_skbuff_dma[entry].buf = desc->des2;
        priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
 
        while (len != 0) {
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                        desc->des2 = dma_map_single(priv->device,
                                                    (skb->data + bmax * i),
                                                    bmax, DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = desc->des2;
+                       if (dma_mapping_error(priv->device, desc->des2))
+                               return -1;
+                       priv->tx_skbuff_dma[entry].buf = desc->des2;
                        priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
                                                        STMMAC_CHAIN_MODE);
                        priv->hw->desc->set_tx_owner(desc);
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                        desc->des2 = dma_map_single(priv->device,
                                                    (skb->data + bmax * i), len,
                                                    DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = desc->des2;
+                       if (dma_mapping_error(priv->device, desc->des2))
+                               return -1;
+                       priv->tx_skbuff_dma[entry].buf = desc->des2;
                        priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
                                                        STMMAC_CHAIN_MODE);
                        priv->hw->desc->set_tx_owner(desc);
index de507c32036c75331e393ad5b013890ffffb9d1b..593e6c4144a7c197140f875f73e86c5583c215d1 100644 (file)
@@ -220,10 +220,10 @@ enum dma_irq_status {
        handle_tx = 0x8,
 };
 
-#define        CORE_IRQ_TX_PATH_IN_LPI_MODE    (1 << 1)
-#define        CORE_IRQ_TX_PATH_EXIT_LPI_MODE  (1 << 2)
-#define        CORE_IRQ_RX_PATH_IN_LPI_MODE    (1 << 3)
-#define        CORE_IRQ_RX_PATH_EXIT_LPI_MODE  (1 << 4)
+#define        CORE_IRQ_TX_PATH_IN_LPI_MODE    (1 << 0)
+#define        CORE_IRQ_TX_PATH_EXIT_LPI_MODE  (1 << 1)
+#define        CORE_IRQ_RX_PATH_IN_LPI_MODE    (1 << 2)
+#define        CORE_IRQ_RX_PATH_EXIT_LPI_MODE  (1 << 3)
 
 #define        CORE_PCS_ANE_COMPLETE           (1 << 5)
 #define        CORE_PCS_LINK_STATUS            (1 << 6)
@@ -287,7 +287,7 @@ struct dma_features {
 
 /* Default LPI timers */
 #define STMMAC_DEFAULT_LIT_LS  0x3E8
-#define STMMAC_DEFAULT_TWT_LS  0x0
+#define STMMAC_DEFAULT_TWT_LS  0x1E
 
 #define STMMAC_CHAIN_MODE      0x1
 #define STMMAC_RING_MODE       0x2
@@ -425,7 +425,7 @@ struct stmmac_mode_ops {
        void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
                      unsigned int extend_desc);
        unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
-       unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+       int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
        int (*set_16kib_bfsize)(int mtu);
        void (*init_desc3)(struct dma_desc *p);
        void (*refill_desc3) (void *priv, struct dma_desc *p);
@@ -445,6 +445,7 @@ struct mac_device_info {
        int multicast_filter_bins;
        int unicast_filter_entries;
        int mcast_bits_log2;
+       unsigned int rx_csum;
 };
 
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
index 71b5419256c138e335c77aab87cf37784ff79037..64d8f56a9c1732f9199cc3a748e25ae8c1107ed7 100644 (file)
@@ -153,7 +153,7 @@ enum inter_frame_gap {
 #define GMAC_CONTROL_RE                0x00000004      /* Receiver Enable */
 
 #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
-                       GMAC_CONTROL_BE)
+                       GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
 
 /* GMAC Frame Filter defines */
 #define GMAC_FRAME_FILTER_PR   0x00000001      /* Promiscuous Mode */
index d8ef18786a1cadae60f5d550269a0036a3f24661..5efe60ea6526b3d06305530b75240b9de719f1f3 100644 (file)
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
 
-       value |= GMAC_CONTROL_IPC;
+       if (hw->rx_csum)
+               value |= GMAC_CONTROL_IPC;
+       else
+               value &= ~GMAC_CONTROL_IPC;
+
        writel(value, ioaddr + GMAC_CONTROL);
 
        value = readl(ioaddr + GMAC_CONTROL);
index 8607488cbcfcfaea7bc70510f0a3f601c5dade7d..192c2491330b1070348f5ccf5c5eb51d492e7b24 100644 (file)
@@ -68,7 +68,7 @@ struct stmmac_counters {
        unsigned int mmc_rx_octetcount_g;
        unsigned int mmc_rx_broadcastframe_g;
        unsigned int mmc_rx_multicastframe_g;
-       unsigned int mmc_rx_crc_errror;
+       unsigned int mmc_rx_crc_error;
        unsigned int mmc_rx_align_error;
        unsigned int mmc_rx_run_error;
        unsigned int mmc_rx_jabber_error;
index 50617c5a0bdb5e63fc930f7af277a1a9fb139af4..08c483bd2ec7bd94d5434f9567c75f609ce27d35 100644 (file)
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
        mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
        mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
        mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
-       mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+       mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
        mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
        mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
        mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
index 650a4be6bce5243e046fcd226719e669f66444e8..5dd50c6cda5be0280e235152057a329a89fb187c 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "stmmac.h"
 
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 {
        struct stmmac_priv *priv = (struct stmmac_priv *)p;
        unsigned int txsize = priv->dma_tx_size;
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            bmax, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       return -1;
+
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
                                                STMMAC_RING_MODE);
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des2 = dma_map_single(priv->device, skb->data + bmax,
                                            len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       return -1;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
                                                STMMAC_RING_MODE);
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
        } else {
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            nopaged_len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       return -1;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
                                                STMMAC_RING_MODE);
index ca01035634a76fbc88414f6550849cfa1c772403..58097c0e2ad502699ace45392bb3f12855101625 100644 (file)
 #include <linux/ptp_clock_kernel.h>
 #include <linux/reset.h>
 
+struct stmmac_tx_info {
+       dma_addr_t buf;
+       bool map_as_page;
+};
+
 struct stmmac_priv {
        /* Frequently used values are kept adjacent for cache effect */
        struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -45,7 +50,7 @@ struct stmmac_priv {
        u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       dma_addr_t *tx_skbuff_dma;
+       struct stmmac_tx_info *tx_skbuff_dma;
        dma_addr_t dma_tx_phy;
        int tx_coalesce;
        int hwts_tx_en;
@@ -105,6 +110,8 @@ struct stmmac_priv {
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_clock_ops;
        unsigned int default_addend;
+       struct clk *clk_ptp_ref;
+       unsigned int clk_ptp_rate;
        u32 adv_ts;
        int use_riwt;
        int irq_wake;
index 9af50bae4dde67c543c15e0344f90ac9d53fd2b8..cf4f38db1c0a60338c1d7479b0cfe28f50b6b558 100644 (file)
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
        STMMAC_MMC_STAT(mmc_rx_octetcount_g),
        STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
        STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
-       STMMAC_MMC_STAT(mmc_rx_crc_errror),
+       STMMAC_MMC_STAT(mmc_rx_crc_error),
        STMMAC_MMC_STAT(mmc_rx_align_error),
        STMMAC_MMC_STAT(mmc_rx_run_error),
        STMMAC_MMC_STAT(mmc_rx_jabber_error),
index 08addd65372818f48075b585eeb1d36ab682025b..6e6ee226de04f60dc8511334db8ca48310fd5cff 100644 (file)
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
  */
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
+       char *phy_bus_name = priv->plat->phy_bus_name;
        bool ret = false;
 
        /* Using PCS we cannot dial with the phy registers at this stage
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
            (priv->pcs == STMMAC_PCS_RTBI))
                goto out;
 
+       /* Never init EEE in case of a switch is attached */
+       if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+               goto out;
+
        /* MAC core supports the EEE feature. */
        if (priv->dma_cap.eee) {
                int tx_lpi_timer = priv->tx_lpi_timer;
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
                                                     tx_lpi_timer);
-               } else
-                       /* Set HW EEE according to the speed */
-                       priv->hw->mac->set_eee_pls(priv->hw,
-                                                  priv->phydev->link);
+               }
+               /* Set HW EEE according to the speed */
+               priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
 
                pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
 
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                /* calculate default added value:
                 * formula is :
                 * addend = (2^32)/freq_div_ratio;
-                * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
-                * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
-                * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+                * where, freq_div_ratio = clk_ptp_ref_i/50MHz
+                * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
+                * NOTE: clk_ptp_ref_i should be >= 50MHz to
                 *       achive 20ns accuracy.
                 *
                 * 2^x * y == (y << x), hence
                 * 2^32 * 50000000 ==> (50000000 << 32)
                 */
                temp = (u64) (50000000ULL << 32);
-               priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+               priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
                priv->hw->ptp->config_addend(priv->ioaddr,
                                             priv->default_addend);
 
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
 
+       /* Fall-back to main clock in case of no PTP ref is passed */
+       priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+       if (IS_ERR(priv->clk_ptp_ref)) {
+               priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+               priv->clk_ptp_ref = NULL;
+       } else {
+               clk_prepare_enable(priv->clk_ptp_ref);
+               priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+       }
+
        priv->adv_ts = 0;
        if (priv->dma_cap.atime_stamp && priv->extend_desc)
                priv->adv_ts = 1;
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
 
 static void stmmac_release_ptp(struct stmmac_priv *priv)
 {
+       if (priv->clk_ptp_ref)
+               clk_disable_unprepare(priv->clk_ptp_ref);
        stmmac_ptp_unregister(priv);
 }
 
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
                else
                        p = priv->dma_tx + i;
                p->des2 = 0;
-               priv->tx_skbuff_dma[i] = 0;
+               priv->tx_skbuff_dma[i].buf = 0;
+               priv->tx_skbuff_dma[i].map_as_page = false;
                priv->tx_skbuff[i] = NULL;
        }
 
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
                else
                        p = priv->dma_tx + i;
 
-               if (priv->tx_skbuff_dma[i]) {
-                       dma_unmap_single(priv->device,
-                                        priv->tx_skbuff_dma[i],
-                                        priv->hw->desc->get_tx_len(p),
-                                        DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[i] = 0;
+               if (priv->tx_skbuff_dma[i].buf) {
+                       if (priv->tx_skbuff_dma[i].map_as_page)
+                               dma_unmap_page(priv->device,
+                                              priv->tx_skbuff_dma[i].buf,
+                                              priv->hw->desc->get_tx_len(p),
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+                                                priv->tx_skbuff_dma[i].buf,
+                                                priv->hw->desc->get_tx_len(p),
+                                                DMA_TO_DEVICE);
                }
 
                if (priv->tx_skbuff[i] != NULL) {
                        dev_kfree_skb_any(priv->tx_skbuff[i]);
                        priv->tx_skbuff[i] = NULL;
+                       priv->tx_skbuff_dma[i].buf = 0;
+                       priv->tx_skbuff_dma[i].map_as_page = false;
                }
        }
 }
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
        if (!priv->rx_skbuff)
                goto err_rx_skbuff;
 
-       priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+       priv->tx_skbuff_dma = kmalloc_array(txsize,
+                                           sizeof(*priv->tx_skbuff_dma),
                                            GFP_KERNEL);
        if (!priv->tx_skbuff_dma)
                goto err_tx_skbuff_dma;
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                        pr_debug("%s: curr %d, dirty %d\n", __func__,
                                 priv->cur_tx, priv->dirty_tx);
 
-               if (likely(priv->tx_skbuff_dma[entry])) {
-                       dma_unmap_single(priv->device,
-                                        priv->tx_skbuff_dma[entry],
-                                        priv->hw->desc->get_tx_len(p),
-                                        DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = 0;
+               if (likely(priv->tx_skbuff_dma[entry].buf)) {
+                       if (priv->tx_skbuff_dma[entry].map_as_page)
+                               dma_unmap_page(priv->device,
+                                              priv->tx_skbuff_dma[entry].buf,
+                                              priv->hw->desc->get_tx_len(p),
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+                                                priv->tx_skbuff_dma[entry].buf,
+                                                priv->hw->desc->get_tx_len(p),
+                                                DMA_TO_DEVICE);
+                       priv->tx_skbuff_dma[entry].buf = 0;
+                       priv->tx_skbuff_dma[entry].map_as_page = false;
                }
                priv->hw->mode->clean_desc3(priv, p);
 
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->hw, dev->mtu);
 
+       ret = priv->hw->mac->rx_ipc(priv->hw);
+       if (!ret) {
+               pr_warn(" RX IPC Checksum Offload disabled\n");
+               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+               priv->hw->rx_csum = 0;
+       }
+
        /* Enable the MAC Rx/Tx */
        stmmac_set_mac(priv->ioaddr, true);
 
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        if (likely(!is_jumbo)) {
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            nopaged_len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       goto dma_map_err;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
                                                csum_insertion, priv->mode);
        } else {
                desc = first;
                entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+               if (unlikely(entry < 0))
+                       goto dma_map_err;
        }
 
        for (i = 0; i < nfrags; i++) {
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
                desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
                                              DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       goto dma_map_err; /* should reuse desc w/o issues */
+
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
+               priv->tx_skbuff_dma[entry].map_as_page = true;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
                                                priv->mode);
                wmb();
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
        spin_unlock(&priv->tx_lock);
+       return NETDEV_TX_OK;
 
+dma_map_err:
+       dev_err(priv->device, "Tx dma map failed\n");
+       dev_kfree_skb(skb);
+       priv->dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                        priv->rx_skbuff_dma[entry] =
                            dma_map_single(priv->device, skb->data, bfsize,
                                           DMA_FROM_DEVICE);
-
+                       if (dma_mapping_error(priv->device,
+                                             priv->rx_skbuff_dma[entry])) {
+                               dev_err(priv->device, "Rx dma map failed\n");
+                               dev_kfree_skb(skb);
+                               break;
+                       }
                        p->des2 = priv->rx_skbuff_dma[entry];
 
                        priv->hw->mode->refill_desc3(priv, p);
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
        unsigned int entry = priv->cur_rx % rxsize;
        unsigned int next_entry;
        unsigned int count = 0;
-       int coe = priv->plat->rx_coe;
+       int coe = priv->hw->rx_csum;
 
        if (netif_msg_rx_status(priv)) {
                pr_debug("%s: descriptor ring:\n", __func__);
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
 
        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
                features &= ~NETIF_F_RXCSUM;
-       else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
-               features &= ~NETIF_F_IPV6_CSUM;
+
        if (!priv->plat->tx_coe)
                features &= ~NETIF_F_ALL_CSUM;
 
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
        return features;
 }
 
+static int stmmac_set_features(struct net_device *netdev,
+                              netdev_features_t features)
+{
+       struct stmmac_priv *priv = netdev_priv(netdev);
+
+       /* Keep the COE Type in case of csum is supporting */
+       if (features & NETIF_F_RXCSUM)
+               priv->hw->rx_csum = priv->plat->rx_coe;
+       else
+               priv->hw->rx_csum = 0;
+       /* No check needed because rx_coe has been set before and it will be
+        * fixed in case of issue.
+        */
+       priv->hw->mac->rx_ipc(priv->hw);
+
+       return 0;
+}
+
 /**
  *  stmmac_interrupt - main ISR
  *  @irq: interrupt number.
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_stop = stmmac_release,
        .ndo_change_mtu = stmmac_change_mtu,
        .ndo_fix_features = stmmac_fix_features,
+       .ndo_set_features = stmmac_set_features,
        .ndo_set_rx_mode = stmmac_set_rx_mode,
        .ndo_tx_timeout = stmmac_tx_timeout,
        .ndo_do_ioctl = stmmac_ioctl,
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
  */
 static int stmmac_hw_init(struct stmmac_priv *priv)
 {
-       int ret;
        struct mac_device_info *mac;
 
        /* Identify the MAC HW device */
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
        /* To use alternate (extended) or normal descriptor structures */
        stmmac_selec_desc_mode(priv);
 
-       ret = priv->hw->mac->rx_ipc(priv->hw);
-       if (!ret) {
-               pr_warn(" RX IPC Checksum Offload not configured.\n");
-               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
-       }
-
-       if (priv->plat->rx_coe)
+       if (priv->plat->rx_coe) {
+               priv->hw->rx_csum = priv->plat->rx_coe;
                pr_info(" RX Checksum Offload Engine supported (type %d)\n",
                        priv->plat->rx_coe);
+       }
        if (priv->plat->tx_coe)
                pr_info(" TX Checksum insertion supported\n");
 
index b7ad3565566cc8a09b7964fcb59aca3921e8e57c..c5ee79d8a8c56478f9987efc2c0631c140aafc0d 100644 (file)
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
 {
        if (priv->ptp_clock) {
                ptp_clock_unregister(priv->ptp_clock);
+               priv->ptp_clock = NULL;
                pr_debug("Removed PTP HW clock successfully on %s\n",
                         priv->dev->name);
        }
index 3dbc047622fa8bee67ed44213b0f8cd74a4ca956..4535df37c22767824d1f7bbe6db56a8e3d0644ab 100644 (file)
@@ -25,8 +25,6 @@
 #ifndef __STMMAC_PTP_H__
 #define __STMMAC_PTP_H__
 
-#define STMMAC_SYSCLOCK 62500000
-
 /* IEEE 1588 PTP register offsets */
 #define PTP_TCR                0x0700  /* Timestamp Control Reg */
 #define PTP_SSIR       0x0704  /* Sub-Second Increment Reg */
index c1ba26c06d731252771dfe7f2273b724bc675068..3de2f0d15fe24b22c58a912c1502e7513ad6e6cc 100644 (file)
 #define        PCI_MEM64BIT    (2<<1)       /* Base addr anywhere in 64 Bit range */
 #define        PCI_MEMSPACE    0x00000001L  /* Bit 0:  Memory Space Indic. */
 
-/*     PCI_BASE_2ND    32 bit  2nd Base address */
-#define        PCI_IOBASE      0xffffff00L  /* Bit 31..8:  I/O Base address */
-#define        PCI_IOSIZE      0x000000fcL  /* Bit 7..2:   I/O Size Requirements */
-#define        PCI_IOSPACE     0x00000001L  /* Bit 0:      I/O Space Indicator */
-
 /*     PCI_SUB_VID     16 bit  Subsystem Vendor ID */
 /*     PCI_SUB_ID      16 bit  Subsystem ID */
 
index c94e2a27446a33a685b8b1c17dee4e559c33afd7..a854d38c231dfebc983a3bcd4840fdd5a2257bd2 100644 (file)
@@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                /* First check if the EEE ability is supported */
                eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
                                                MDIO_MMD_PCS, phydev->addr);
-               if (eee_cap < 0)
-                       return eee_cap;
+               if (eee_cap <= 0)
+                       goto eee_exit_err;
 
                cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
                if (!cap)
-                       return -EPROTONOSUPPORT;
+                       goto eee_exit_err;
 
                /* Check which link settings negotiated and verify it in
                 * the EEE advertising registers.
                 */
                eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
                                               MDIO_MMD_AN, phydev->addr);
-               if (eee_lp < 0)
-                       return eee_lp;
+               if (eee_lp <= 0)
+                       goto eee_exit_err;
 
                eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
                                                MDIO_MMD_AN, phydev->addr);
-               if (eee_adv < 0)
-                       return eee_adv;
+               if (eee_adv <= 0)
+                       goto eee_exit_err;
 
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
                if (!(lp & adv & settings[idx].setting))
-                       return -EPROTONOSUPPORT;
+                       goto eee_exit_err;
 
                if (clk_stop_enable) {
                        /* Configure the PHY to stop receiving xMII
@@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 
                return 0; /* EEE supported */
        }
-
+eee_exit_err:
        return -EPROTONOSUPPORT;
 }
 EXPORT_SYMBOL(phy_init_eee);
index d6e90c72c257ebe6740d3980dc7273c6b128978d..6dfcbf523936ef69527f5d23168dbf51ee638094 100644 (file)
@@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev)
                if (!netdev_mc_empty(netdev)) {
                        new_table = vmxnet3_copy_mc(netdev);
                        if (new_table) {
-                               new_mode |= VMXNET3_RXM_MCAST;
                                rxConf->mfTableLen = cpu_to_le16(
                                        netdev_mc_count(netdev) * ETH_ALEN);
                                new_table_pa = dma_map_single(
@@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev)
                                                        new_table,
                                                        rxConf->mfTableLen,
                                                        PCI_DMA_TODEVICE);
+                       }
+
+                       if (new_table_pa) {
+                               new_mode |= VMXNET3_RXM_MCAST;
                                rxConf->mfTablePA = cpu_to_le64(new_table_pa);
                        } else {
-                               netdev_info(netdev, "failed to copy mcast list"
-                                           ", setting ALL_MULTI\n");
+                               netdev_info(netdev,
+                                           "failed to copy mcast list, setting ALL_MULTI\n");
                                new_mode |= VMXNET3_RXM_ALL_MULTI;
                        }
                }
 
-
        if (!(new_mode & VMXNET3_RXM_MCAST)) {
                rxConf->mfTableLen = 0;
                rxConf->mfTablePA = 0;
@@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev)
                               VMXNET3_CMD_UPDATE_MAC_FILTERS);
        spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
-       if (new_table) {
+       if (new_table_pa)
                dma_unmap_single(&adapter->pdev->dev, new_table_pa,
                                 rxConf->mfTableLen, PCI_DMA_TODEVICE);
-               kfree(new_table);
-       }
+       kfree(new_table);
 }
 
 void
index 29ee77f2c97f3cc0e27e2985751297e3fbf01f90..3759479f959a43b86654e7b493a5d1beb5b6034b 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.2.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.2.1.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01020000
+#define VMXNET3_DRIVER_VERSION_NUM      0x01020100
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 1fb7b37d1402a447b84298829bab61d4f8b6d129..beb377b2d4b78e67e15fa2c7fe6404bef240eba0 100644 (file)
@@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
        } else if (vxlan->flags & VXLAN_F_L3MISS) {
                union vxlan_addr ipa = {
                        .sin.sin_addr.s_addr = tip,
-                       .sa.sa_family = AF_INET,
+                       .sin.sin_family = AF_INET,
                };
 
                vxlan_ip_miss(dev, &ipa);
@@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
        } else if (vxlan->flags & VXLAN_F_L3MISS) {
                union vxlan_addr ipa = {
                        .sin6.sin6_addr = msg->target,
-                       .sa.sa_family = AF_INET6,
+                       .sin6.sin6_family = AF_INET6,
                };
 
                vxlan_ip_miss(dev, &ipa);
@@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
                if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
                        union vxlan_addr ipa = {
                                .sin.sin_addr.s_addr = pip->daddr,
-                               .sa.sa_family = AF_INET,
+                               .sin.sin_family = AF_INET,
                        };
 
                        vxlan_ip_miss(dev, &ipa);
@@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
                if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
                        union vxlan_addr ipa = {
                                .sin6.sin6_addr = pip6->daddr,
-                               .sa.sa_family = AF_INET6,
+                               .sin6.sin6_family = AF_INET6,
                        };
 
                        vxlan_ip_miss(dev, &ipa);
index 334c2ece855a92af6576b01ec87cd47af58b8540..da92bfa76b7cf1d37e9ea819edf00c59d84e350b 100644 (file)
@@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv)
 
        kfree_skb(priv->rx_skb);
 
-       usb_put_dev(priv->udev);
-
        at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
                 __func__);
        ieee80211_free_hw(priv->hw);
@@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface)
 
        wiphy_info(priv->hw->wiphy, "disconnecting\n");
        at76_delete_device(priv);
+       usb_put_dev(priv->udev);
        dev_info(&interface->dev, "disconnected\n");
 }
 
index 5fe29b9f8fa26e31b05eaab237ac4830aaf21471..8f68426ca653d93dc58ea990a213c7091a13553a 100644 (file)
@@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
 
        if (strncmp("trigger", buf, 7) == 0) {
                ath9k_spectral_scan_trigger(sc->hw);
-       } else if (strncmp("background", buf, 9) == 0) {
+       } else if (strncmp("background", buf, 10) == 0) {
                ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
                ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
        } else if (strncmp("chanscan", buf, 8) == 0) {
index 6451d2b6abcff3c33f486c9d7eb90390647a1266..824f5e2877835d8a829f643d8b15cf1ed85e2572 100644 (file)
@@ -51,7 +51,6 @@ config IWLWIFI_LEDS
 
 config IWLDVM
        tristate "Intel Wireless WiFi DVM Firmware support"
-       depends on m
        default IWLWIFI
        help
          This is the driver that supports the DVM firmware which is
@@ -60,7 +59,6 @@ config IWLDVM
 
 config IWLMVM
        tristate "Intel Wireless WiFi MVM Firmware support"
-       depends on m
        help
          This is the driver that supports the MVM firmware which is
          currently only available for 7260 and 3160 devices.
index 6dc5dd3ced44723943934f114c6fde78c98a565f..ed50de6362ed1d5dcd56b45243ff0b140dcbafe5 100644 (file)
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* recalculate basic rates */
        iwl_calc_basic_rates(priv, ctx);
 
+       /*
+        * force CTS-to-self frames protection if RTS-CTS is not preferred
+        * one aggregation protection method
+        */
+       if (!priv->hw_params.use_rts_for_aggregation)
+               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
        else
                ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
 
+       if (bss_conf->use_cts_prot)
+               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
        memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
 
        if (vif->type == NL80211_IFTYPE_AP ||
index 48730064da73f5e1e058f756d9473a2a0a5bc376..d67a37a786aa136a20b192f0a7712992ffdb527d 100644 (file)
@@ -67,8 +67,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  9
-#define IWL3160_UCODE_API_MAX  9
+#define IWL7260_UCODE_API_MAX  10
+#define IWL3160_UCODE_API_MAX  10
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   9
index 44b19e015102096e39fd086539794ab608c4c359..e93c6972290b84de3cf26214ca6894d0db0c4f79 100644 (file)
@@ -67,7 +67,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  9
+#define IWL8000_UCODE_API_MAX  10
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   8
index 33da3dfcfa4f05f112e4271487c9f5131bb3f20a..d4bd550f505c7dcfa59bc60f4c852a5cf6aed39e 100644 (file)
@@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter)
 
        bool is_legacy = false;
 
-       if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+       if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
                is_legacy = true;
 
        return is_legacy;
index 361435f8608a125ca4cfdfd38b1bfa6212215ec9..1ac6383e79471f5944e4267f3ae8bc28715c53e2 100644 (file)
@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
        {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
index e29e15dca86ee3b7d1efc6d84a2607bcd173b449..f379689dde309b7bf63eb511ef7f28a827f71fc5 100644 (file)
@@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
        init_waitqueue_head(&queue->dealloc_wq);
        atomic_set(&queue->inflight_packets, 0);
 
+       netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+                       XENVIF_NAPI_WEIGHT);
+
        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
                err = bind_interdomain_evtchn_to_irqhandler(
@@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
        wake_up_process(queue->task);
        wake_up_process(queue->dealloc_task);
 
-       netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
-                       XENVIF_NAPI_WEIGHT);
-
        return 0;
 
 err_rx_unbind:
index 9dd63b82202555e0ebdd46b3460a8ec1107f8f9f..e9bf2f47b61ada12a6730990c433d57a71ce90ce 100644 (file)
@@ -510,7 +510,7 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
 
        WARN_ON(nt->mw[mw_num].virt_addr == NULL);
 
-       if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
                num_qps_mw = nt->max_qps / mw_max + 1;
        else
                num_qps_mw = nt->max_qps / mw_max;
@@ -576,6 +576,19 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
                return -ENOMEM;
        }
 
+       /*
+        * we must ensure that the memory address allocated is BAR size
+        * aligned in order for the XLAT register to take the value. This
+        * is a requirement of the hardware. It is recommended to setup CMA
+        * for BAR sizes equal or greater than 4MB.
+        */
+       if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
+               dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
+                       &mw->dma_addr);
+               ntb_free_mw(nt, num_mw);
+               return -ENOMEM;
+       }
+
        /* Notify HW the memory location of the receive buffer */
        ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
 
@@ -856,7 +869,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
        qp->client_ready = NTB_LINK_DOWN;
        qp->event_handler = NULL;
 
-       if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
                num_qps_mw = nt->max_qps / mw_max + 1;
        else
                num_qps_mw = nt->max_qps / mw_max;
index 9eae9834bcc7a1ee02ea01c8debb40882f416db9..a0580afe1713a5f58db5e96da635028856200a08 100644 (file)
@@ -913,7 +913,7 @@ static int __init dino_probe(struct parisc_device *dev)
        printk("%s version %s found at 0x%lx\n", name, version, hpa);
 
        if (!request_mem_region(hpa, PAGE_SIZE, name)) {
-               printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
+               printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n",
                        hpa);
                return 1;
        }
index 8922c376456aee763090296a7045211aaae90839..90f5ccacce4ba819786506054deec71f05473026 100644 (file)
@@ -56,7 +56,7 @@ config PCI_HOST_GENERIC
          controller, such as the one emulated by kvmtool.
 
 config PCIE_SPEAR13XX
-       tristate "STMicroelectronics SPEAr PCIe controller"
+       bool "STMicroelectronics SPEAr PCIe controller"
        depends on ARCH_SPEAR13XX
        select PCIEPORTBUS
        select PCIE_DW
index 0dd742719154a9d7172dcd9c4f9f9dc3e26e6030..4ff8cbb620d3204e84a7afbb6925ed7faf055812 100644 (file)
@@ -41,9 +41,9 @@ config PHY_MVEBU_SATA
 config PHY_MIPHY365X
        tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
        depends on ARCH_STI
-       depends on GENERIC_PHY
        depends on HAS_IOMEM
        depends on OF
+       select GENERIC_PHY
        help
          Enable this to support the miphy transceiver (for SATA/PCIE)
          that is part of STMicroelectronics STiH41x SoC series.
index b05302b09c9fd0f0bdca202f1b52f1e51b94e0f1..392101c8d6b04aad1f9cc25119c65cac0ff73471 100644 (file)
@@ -542,6 +542,7 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
        },
        { },
 };
+MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match);
 
 static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
 {
index e1a6623d4696f1c24267292764b58f65b6137f47..9cd33a4bcfb193c99bd03d981f9d75d0e54f7bac 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
 #include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
 #include <linux/usb/musb-omap.h>
 #include <linux/usb/ulpi.h>
 #include <linux/i2c/twl.h>
@@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
        }
 }
 
-static int twl4030_phy_power_off(struct phy *phy)
+static int twl4030_usb_runtime_suspend(struct device *dev)
 {
-       struct twl4030_usb *twl = phy_get_drvdata(phy);
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
 
+       dev_dbg(twl->dev, "%s\n", __func__);
        if (twl->asleep)
                return 0;
 
        twl4030_phy_power(twl, 0);
        twl->asleep = 1;
-       dev_dbg(twl->dev, "%s\n", __func__);
+
        return 0;
 }
 
-static void __twl4030_phy_power_on(struct twl4030_usb *twl)
+static int twl4030_usb_runtime_resume(struct device *dev)
 {
+       struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+       dev_dbg(twl->dev, "%s\n", __func__);
+       if (!twl->asleep)
+               return 0;
+
        twl4030_phy_power(twl, 1);
-       twl4030_i2c_access(twl, 1);
-       twl4030_usb_set_mode(twl, twl->usb_mode);
-       if (twl->usb_mode == T2_USB_MODE_ULPI)
-               twl4030_i2c_access(twl, 0);
+       twl->asleep = 0;
+
+       return 0;
+}
+
+static int twl4030_phy_power_off(struct phy *phy)
+{
+       struct twl4030_usb *twl = phy_get_drvdata(phy);
+
+       dev_dbg(twl->dev, "%s\n", __func__);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
+       return 0;
 }
 
 static int twl4030_phy_power_on(struct phy *phy)
 {
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
-       if (!twl->asleep)
-               return 0;
-       __twl4030_phy_power_on(twl);
-       twl->asleep = 0;
        dev_dbg(twl->dev, "%s\n", __func__);
+       pm_runtime_get_sync(twl->dev);
+       twl4030_i2c_access(twl, 1);
+       twl4030_usb_set_mode(twl, twl->usb_mode);
+       if (twl->usb_mode == T2_USB_MODE_ULPI)
+               twl4030_i2c_access(twl, 0);
 
        /*
         * XXX When VBUS gets driven after musb goes to A mode,
@@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                 * USB_LINK_VBUS state.  musb_hdrc won't care until it
                 * starts to handle softconnect right.
                 */
+               if ((status == OMAP_MUSB_VBUS_VALID) ||
+                   (status == OMAP_MUSB_ID_GROUND)) {
+                       if (twl->asleep)
+                               pm_runtime_get_sync(twl->dev);
+               } else {
+                       if (!twl->asleep) {
+                               pm_runtime_mark_last_busy(twl->dev);
+                               pm_runtime_put_autosuspend(twl->dev);
+                       }
+               }
                omap_musb_mailbox(status);
        }
-       sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+
+       /* don't schedule during sleep - irq works right then */
+       if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
+               cancel_delayed_work(&twl->id_workaround_work);
+               schedule_delayed_work(&twl->id_workaround_work, HZ);
+       }
+
+       if (irq)
+               sysfs_notify(&twl->dev->kobj, NULL, "vbus");
 
        return IRQ_HANDLED;
 }
@@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work)
 {
        struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
                id_workaround_work.work);
-       enum omap_musb_vbus_id_status status;
-       bool status_changed = false;
-
-       status = twl4030_usb_linkstat(twl);
-
-       spin_lock_irq(&twl->lock);
-       if (status >= 0 && status != twl->linkstat) {
-               twl->linkstat = status;
-               status_changed = true;
-       }
-       spin_unlock_irq(&twl->lock);
-
-       if (status_changed) {
-               dev_dbg(twl->dev, "handle missing status change to %d\n",
-                               status);
-               omap_musb_mailbox(status);
-       }
 
-       /* don't schedule during sleep - irq works right then */
-       if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
-               cancel_delayed_work(&twl->id_workaround_work);
-               schedule_delayed_work(&twl->id_workaround_work, HZ);
-       }
+       twl4030_usb_irq(0, twl);
 }
 
 static int twl4030_phy_init(struct phy *phy)
@@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
        enum omap_musb_vbus_id_status status;
 
-       /*
-        * Start in sleep state, we'll get called through set_suspend()
-        * callback when musb is runtime resumed and it's time to start.
-        */
-       __twl4030_phy_power(twl, 0);
-       twl->asleep = 1;
-
+       pm_runtime_get_sync(twl->dev);
        status = twl4030_usb_linkstat(twl);
        twl->linkstat = status;
 
-       if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
+       if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
                omap_musb_mailbox(twl->linkstat);
-               twl4030_phy_power_on(phy);
-       }
 
        sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
        return 0;
 }
 
@@ -650,6 +661,11 @@ static const struct phy_ops ops = {
        .owner          = THIS_MODULE,
 };
 
+static const struct dev_pm_ops twl4030_usb_pm_ops = {
+       SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
+                          twl4030_usb_runtime_resume, NULL)
+};
+
 static int twl4030_usb_probe(struct platform_device *pdev)
 {
        struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
@@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
 
        ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
 
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
        /* Our job is to use irqs and status from the power module
         * to keep the transceiver disabled when nothing's connected.
         *
@@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev)
                return status;
        }
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(twl->dev);
+
        dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
        return 0;
 }
@@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
        struct twl4030_usb *twl = platform_get_drvdata(pdev);
        int val;
 
+       pm_runtime_get_sync(twl->dev);
        cancel_delayed_work(&twl->id_workaround_work);
        device_remove_file(twl->dev, &dev_attr_vbus);
 
@@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
 
        /* disable complete OTG block */
        twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
-
-       if (!twl->asleep)
-               twl4030_phy_power(twl, 0);
+       pm_runtime_mark_last_busy(twl->dev);
+       pm_runtime_put(twl->dev);
 
        return 0;
 }
@@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = {
        .remove         = twl4030_usb_remove,
        .driver         = {
                .name   = "twl4030_usb",
+               .pm     = &twl4030_usb_pm_ops,
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(twl4030_usb_id_table),
        },
index 9ca59a01874316585a190093b0265401e17975a3..e12e5b07f6d751aba9cce63cf49d2acabe50b7ab 100644 (file)
@@ -461,6 +461,7 @@ static struct irq_chip byt_irqchip = {
        .irq_mask = byt_irq_mask,
        .irq_unmask = byt_irq_unmask,
        .irq_set_type = byt_irq_type,
+       .flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
index fc468a3d95cea3a2f4a8bc260717e6b1c920449d..02152de135b5c43b79318c6c5ac7971f0723e478 100644 (file)
@@ -88,7 +88,6 @@ struct ideapad_private {
        struct dentry *debug;
        unsigned long cfg;
        bool has_hw_rfkill_switch;
-       bool has_touchpad_control;
 };
 
 static bool no_bt_rfkill;
@@ -456,7 +455,7 @@ struct ideapad_rfk_data {
        int type;
 };
 
-const const struct ideapad_rfk_data ideapad_rfk_data[] = {
+static const struct ideapad_rfk_data ideapad_rfk_data[] = {
        { "ideapad_wlan",    CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN },
        { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH },
        { "ideapad_3g",        CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN },
@@ -767,9 +766,6 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
        unsigned long value;
 
-       if (!priv->has_touchpad_control)
-               return;
-
        /* Without reading from EC touchpad LED doesn't switch state */
        if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
                /* Some IdeaPads don't really turn off touchpad - they only
@@ -833,29 +829,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
  * always results in 0 on these models, causing ideapad_laptop to wrongly
  * report all radios as hardware-blocked.
  */
-static struct dmi_system_id no_hw_rfkill_list[] = {
-       {
-               .ident = "Lenovo Yoga 2 11 / 13 / Pro",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
-               },
-       },
-       {}
-};
-
-/*
- * Some models don't offer touchpad ctrl through the ideapad interface, causing
- * ideapad_sync_touchpad_state to send wrong touchpad enable/disable events.
- */
-static struct dmi_system_id no_touchpad_ctrl_list[] = {
-       {
-               .ident = "Lenovo Yoga 1 series",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga"),
-               },
-       },
+static const struct dmi_system_id no_hw_rfkill_list[] = {
        {
                .ident = "Lenovo Yoga 2 11 / 13 / Pro",
                .matches = {
@@ -889,7 +863,6 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        priv->adev = adev;
        priv->platform_device = pdev;
        priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list);
-       priv->has_touchpad_control = !dmi_check_system(no_touchpad_ctrl_list);
 
        ret = ideapad_sysfs_init(priv);
        if (ret)
index b062d3d7b37304e9e724fd869419febfae522c19..d0dce734b2edb37e055d6f30f6901a234645708f 100644 (file)
@@ -1255,10 +1255,15 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
                                         const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
-       int mode = -1;
-       int time = -1;
+       int mode;
+       int time;
+       int ret;
 
-       if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
+
+       ret = kstrtoint(buf, 0, &mode);
+       if (ret)
+               return ret;
+       if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO)
                return -EINVAL;
 
        /* Set the Keyboard Backlight Mode where:
@@ -1266,11 +1271,12 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
         *      Auto - KBD backlight turns off automatically in given time
         *      FN-Z - KBD backlight "toggles" when hotkey pressed
         */
-       if (mode != -1 && toshiba->kbd_mode != mode) {
+       if (toshiba->kbd_mode != mode) {
                time = toshiba->kbd_time << HCI_MISC_SHIFT;
                time = time + toshiba->kbd_mode;
-               if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
-                       return -EIO;
+               ret = toshiba_kbd_illum_status_set(toshiba, time);
+               if (ret)
+                       return ret;
                toshiba->kbd_mode = mode;
        }
 
@@ -1857,9 +1863,16 @@ static int toshiba_acpi_resume(struct device *device)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
        u32 result;
+       acpi_status status;
+
+       if (dev->hotkey_dev) {
+               status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB",
+                               NULL, NULL);
+               if (ACPI_FAILURE(status))
+                       pr_info("Unable to re-enable hotkeys\n");
 
-       if (dev->hotkey_dev)
                hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result);
+       }
 
        return 0;
 }
index b1cda6ffdbcc5f6bfc6aa5d3e4c42d44b038823b..45e05b32f9b66ea0fc0c27c997ebdefecd3fa3c0 100644 (file)
@@ -953,6 +953,7 @@ static const struct x86_cpu_id rapl_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
        { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */
        { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */
+       { X86_VENDOR_INTEL, 6, 0x3f},/* Haswell */
        { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */
        /* TODO: Add more CPU IDs after testing */
        {}
@@ -1166,11 +1167,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
 
        for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
                /* use physical package id to read counters */
-               if (!rapl_check_domain(cpu, i))
+               if (!rapl_check_domain(cpu, i)) {
                        rp->domain_map |= 1 << i;
-               else
-                       pr_warn("RAPL domain %s detection failed\n",
-                               rapl_domain_names[i]);
+                       pr_info("Found RAPL domain %s\n", rapl_domain_names[i]);
+               }
        }
        rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
        if (!rp->nr_domains) {
index 2ead7e78c4568ec7fe50ece74c3109d424efa852..14ba80bfa571bc4b34eb795c85f6725b96f6ad7b 100644 (file)
@@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx);
  * strings when running as a module.
  */
 static char *dasd[256];
-module_param_array(dasd, charp, NULL, 0);
+module_param_array(dasd, charp, NULL, S_IRUGO);
 
 /*
  * Single spinlock to protect devmap and servermap structures and lists.
index 97ef37b51068369b44d41376e630534d014241a5..e7646ce3d659218ca7fd9762e05cf0d7c4c95a5c 100644 (file)
@@ -889,6 +889,7 @@ extern const struct attribute_group *qeth_generic_attr_groups[];
 extern const struct attribute_group *qeth_osn_attr_groups[];
 extern struct workqueue_struct *qeth_wq;
 
+int qeth_card_hw_is_reachable(struct qeth_card *);
 const char *qeth_get_cardname_short(struct qeth_card *);
 int qeth_realloc_buffer_pool(struct qeth_card *, int);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
index c0d6ba8655c742991bc13fe1b2fe2a7a740ea4ac..fd22c811cbe195d2056f6bb5b6339fa33113b8cc 100644 (file)
@@ -73,6 +73,13 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 struct workqueue_struct *qeth_wq;
 EXPORT_SYMBOL_GPL(qeth_wq);
 
+int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+       return (card->state == CARD_STATE_SOFTSETUP) ||
+               (card->state == CARD_STATE_UP);
+}
+EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
+
 static void qeth_close_dev_handler(struct work_struct *work)
 {
        struct qeth_card *card;
@@ -5790,6 +5797,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
        struct qeth_card *card = netdev->ml_priv;
        enum qeth_link_types link_type;
        struct carrier_info carrier_info;
+       int rc;
        u32 speed;
 
        if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
@@ -5832,8 +5840,14 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
        /* Check if we can obtain more accurate information.     */
        /* If QUERY_CARD_INFO command is not supported or fails, */
        /* just return the heuristics that was filled above.     */
-       if (qeth_query_card_info(card, &carrier_info) != 0)
+       if (!qeth_card_hw_is_reachable(card))
+               return -ENODEV;
+       rc = qeth_query_card_info(card, &carrier_info);
+       if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
                return 0;
+       if (rc) /* report error from the hardware operation */
+               return rc;
+       /* on success, fill in the information got from the hardware */
 
        netdev_dbg(netdev,
        "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
index ae1bc04b8653eed571dbbc9a271ec9cdc93205f3..59e3aa538b4da4594456b0965bc0fe42a49379f3 100644 (file)
@@ -5,17 +5,12 @@
 
 #include <linux/slab.h>
 #include <asm/ebcdic.h>
+#include "qeth_core.h"
 #include "qeth_l2.h"
 
 #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
 struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
 
-static int qeth_card_hw_is_reachable(struct qeth_card *card)
-{
-       return (card->state == CARD_STATE_SOFTSETUP) ||
-               (card->state == CARD_STATE_UP);
-}
-
 static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
                                struct device_attribute *attr, char *buf,
                                int show_state)
index 19396dc4ee47f947f569a26308416f99401acb21..bed2fedeb05710f8d17313ee49e5a6e00ce7f471 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) },
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
index e7b2e02341962426ea6e07a351d9932a01739f9d..69139ce7420d65bb751c2691e954fd1299bd1262 100644 (file)
@@ -199,7 +199,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
        fence->num_fences = 1;
        atomic_set(&fence->status, 1);
 
-       fence_get(&pt->base);
        fence->cbs[0].sync_pt = &pt->base;
        fence->cbs[0].fence = fence;
        if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
index 7e3f019d7e72543c40dd11299a051383a1a57224..4662e00b456a25d4c08b755bc68b6bdbf37e8ed2 100644 (file)
@@ -574,6 +574,9 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
        for (i = 0; i < 2; i++) {
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
+               if (!channel->connector.funcs)
+                       continue;
+
                channel->connector.funcs->destroy(&channel->connector);
                channel->encoder.funcs->destroy(&channel->encoder);
        }
index 6f393a11f44d2d57fa074defc77ce048796b36cc..50de10a550e970f5c1710653eb1449215a67e3e0 100644 (file)
@@ -281,7 +281,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
 
                ipu_idmac_put(ipu_plane->ipu_ch);
                ipu_dmfc_put(ipu_plane->dmfc);
-               ipu_dp_put(ipu_plane->dp);
+               if (ipu_plane->dp)
+                       ipu_dp_put(ipu_plane->dp);
        }
 }
 
index 0367f5a2cfe4b0760d6dfc75e53d5257399c1ca7..0c59e26c080583f85df3bf21194e777bdab7a079 100644 (file)
@@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        if (sb->s_root == NULL) {
                CERROR("%s: can't make root dentry\n",
                        ll_get_fsname(sb, NULL, 0));
-               GOTO(out_root, err = -ENOMEM);
+               GOTO(out_lock_cn_cb, err = -ENOMEM);
        }
 
        sbi->ll_sdev_orig = sb->s_dev;
index 4db7987ec225f4d345efbb3249f0005897f95b2e..57d9df84ce5d7aca4fe25c557988742fbbb986bc 100644 (file)
@@ -540,6 +540,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT3434", 0 },
        { "INT3435", 0 },
        { "80860F0A", 0 },
+       { "8086228A", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 7b63677475c1f0cc57f9a5f947d74d66cf9aad81..d7d4584549a5d77e84b02f567085bcc77da5526a 100644 (file)
@@ -526,6 +526,45 @@ static void atmel_enable_ms(struct uart_port *port)
        UART_PUT_IER(port, ier);
 }
 
+/*
+ * Disable modem status interrupts
+ */
+static void atmel_disable_ms(struct uart_port *port)
+{
+       struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+       uint32_t idr = 0;
+
+       /*
+        * Interrupt should not be disabled twice
+        */
+       if (!atmel_port->ms_irq_enabled)
+               return;
+
+       atmel_port->ms_irq_enabled = false;
+
+       if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
+       else
+               idr |= ATMEL_US_CTSIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
+       else
+               idr |= ATMEL_US_DSRIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
+       else
+               idr |= ATMEL_US_RIIC;
+
+       if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
+               disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
+       else
+               idr |= ATMEL_US_DCDIC;
+
+       UART_PUT_IDR(port, idr);
+}
+
 /*
  * Control the transmission of a break signal
  */
@@ -1993,7 +2032,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
 
        /* CTS flow-control and modem-status interrupts */
        if (UART_ENABLE_MS(port, termios->c_cflag))
-               port->ops->enable_ms(port);
+               atmel_enable_ms(port);
+       else
+               atmel_disable_ms(port);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
index 01951d27cc03c8a4801dd66083c9a90c079b0332..806e4bcadbd7ee6d2d7e543798fb8a621b4e21cb 100644 (file)
@@ -581,7 +581,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
 {
        unsigned int status;
 
-       status = cdns_uart_readl(CDNS_UART_ISR_OFFSET) & CDNS_UART_IXR_TXEMPTY;
+       status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY;
        return status ? TIOCSER_TEMT : 0;
 }
 
index d72b9d2de2c5f828be33ea965c6ad87617722859..4935ac38fd008d17a51ed866e9fc002cddf021f5 100644 (file)
 static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
 {
        struct device *dev = ci->gadget.dev.parent;
-       int val;
 
        switch (event) {
        case CI_HDRC_CONTROLLER_RESET_EVENT:
                dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
                writel(0, USB_AHBBURST);
                writel(0, USB_AHBMODE);
+               usb_phy_init(ci->transceiver);
                break;
        case CI_HDRC_CONTROLLER_STOPPED_EVENT:
                dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
                 * Put the transceiver in non-driving mode. Otherwise host
                 * may not detect soft-disconnection.
                 */
-               val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
-               val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-               val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
-               usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
+               usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
                break;
        default:
                dev_dbg(dev, "unknown ci_hdrc event\n");
index 46f5161c78913c75678ae3416f4031099b880090..d481c99a20d7b13adc9b9ec5c00565fd7d0ecc60 100644 (file)
@@ -5024,9 +5024,10 @@ static void hub_events(void)
 
                hub = list_entry(tmp, struct usb_hub, event_list);
                kref_get(&hub->kref);
+               hdev = hub->hdev;
+               usb_get_dev(hdev);
                spin_unlock_irq(&hub_event_lock);
 
-               hdev = hub->hdev;
                hub_dev = hub->intfdev;
                intf = to_usb_interface(hub_dev);
                dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
@@ -5139,6 +5140,7 @@ static void hub_events(void)
                usb_autopm_put_interface(intf);
  loop_disconnected:
                usb_unlock_device(hdev);
+               usb_put_dev(hdev);
                kref_put(&hub->kref, hub_release);
 
        } /* end while (1) */
index 7c9618e916e23cd7eca1cc6ae098edf999a4aa06..ce6071d65d510696ebb4d428cd7240dd51a95b3e 100644 (file)
@@ -1649,6 +1649,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
                        dev_err(hsotg->dev,
                                "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
                                __func__, val);
+                       break;
                }
 
                udelay(1);
@@ -2747,13 +2748,14 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
 
        dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
 
-       if (hsotg->phy) {
-               phy_init(hsotg->phy);
-               phy_power_on(hsotg->phy);
-       } else if (hsotg->uphy)
+       if (hsotg->uphy)
                usb_phy_init(hsotg->uphy);
-       else if (hsotg->plat->phy_init)
+       else if (hsotg->plat && hsotg->plat->phy_init)
                hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
+       else {
+               phy_init(hsotg->phy);
+               phy_power_on(hsotg->phy);
+       }
 }
 
 /**
@@ -2767,13 +2769,14 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
 {
        struct platform_device *pdev = to_platform_device(hsotg->dev);
 
-       if (hsotg->phy) {
-               phy_power_off(hsotg->phy);
-               phy_exit(hsotg->phy);
-       } else if (hsotg->uphy)
+       if (hsotg->uphy)
                usb_phy_shutdown(hsotg->uphy);
-       else if (hsotg->plat->phy_exit)
+       else if (hsotg->plat && hsotg->plat->phy_exit)
                hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
+       else {
+               phy_power_off(hsotg->phy);
+               phy_exit(hsotg->phy);
+       }
 }
 
 /**
@@ -2892,13 +2895,11 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
                return -ENODEV;
 
        /* all endpoints should be shutdown */
-       for (ep = 0; ep < hsotg->num_of_eps; ep++)
+       for (ep = 1; ep < hsotg->num_of_eps; ep++)
                s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
 
        spin_lock_irqsave(&hsotg->lock, flags);
 
-       s3c_hsotg_phy_disable(hsotg);
-
        if (!driver)
                hsotg->driver = NULL;
 
@@ -2941,7 +2942,6 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
                s3c_hsotg_phy_enable(hsotg);
                s3c_hsotg_core_init(hsotg);
        } else {
-               s3c_hsotg_disconnect(hsotg);
                s3c_hsotg_phy_disable(hsotg);
        }
 
@@ -3441,13 +3441,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
 
        hsotg->irq = ret;
 
-       ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
-                               dev_name(dev), hsotg);
-       if (ret < 0) {
-               dev_err(dev, "cannot claim IRQ\n");
-               goto err_clk;
-       }
-
        dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
 
        hsotg->gadget.max_speed = USB_SPEED_HIGH;
@@ -3488,9 +3481,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
        if (hsotg->phy && (phy_get_bus_width(phy) == 8))
                hsotg->phyif = GUSBCFG_PHYIF8;
 
-       if (hsotg->phy)
-               phy_init(hsotg->phy);
-
        /* usb phy enable */
        s3c_hsotg_phy_enable(hsotg);
 
@@ -3498,6 +3488,17 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
        s3c_hsotg_init(hsotg);
        s3c_hsotg_hw_cfg(hsotg);
 
+       ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
+                               dev_name(dev), hsotg);
+       if (ret < 0) {
+               s3c_hsotg_phy_disable(hsotg);
+               clk_disable_unprepare(hsotg->clk);
+               regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
+                                      hsotg->supplies);
+               dev_err(dev, "cannot claim IRQ\n");
+               goto err_clk;
+       }
+
        /* hsotg->num_of_eps holds number of EPs other than ep0 */
 
        if (hsotg->num_of_eps == 0) {
@@ -3582,9 +3583,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
                usb_gadget_unregister_driver(hsotg->driver);
        }
 
-       s3c_hsotg_phy_disable(hsotg);
-       if (hsotg->phy)
-               phy_exit(hsotg->phy);
        clk_disable_unprepare(hsotg->clk);
 
        return 0;
index b769c1faaf0385dff2543592e2fe68745d1fc7c5..9069984fe5cf0eae313a0dfb5faa2fe06ba9766c 100644 (file)
@@ -799,20 +799,21 @@ static int dwc3_remove(struct platform_device *pdev)
 {
        struct dwc3     *dwc = platform_get_drvdata(pdev);
 
+       dwc3_debugfs_exit(dwc);
+       dwc3_core_exit_mode(dwc);
+       dwc3_event_buffers_cleanup(dwc);
+       dwc3_free_event_buffers(dwc);
+
        usb_phy_set_suspend(dwc->usb2_phy, 1);
        usb_phy_set_suspend(dwc->usb3_phy, 1);
        phy_power_off(dwc->usb2_generic_phy);
        phy_power_off(dwc->usb3_generic_phy);
 
+       dwc3_core_exit(dwc);
+
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       dwc3_debugfs_exit(dwc);
-       dwc3_core_exit_mode(dwc);
-       dwc3_event_buffers_cleanup(dwc);
-       dwc3_free_event_buffers(dwc);
-       dwc3_core_exit(dwc);
-
        return 0;
 }
 
index 9dcfbe7cd5f5d5cfc58239ee578cd68cae09a63f..fc0de3753648187ab9f1fd68ac121a83cd808e91 100644 (file)
@@ -576,9 +576,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
        if (omap->extcon_id_dev.edev)
                extcon_unregister_interest(&omap->extcon_id_dev);
        dwc3_omap_disable_irqs(omap);
+       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
 
        return 0;
 }
index 349cacc577d81a679cacd4494bb7ead82f11cfa4..490a6ca0073369844b40eff51f88b2cbfd353bce 100644 (file)
@@ -527,7 +527,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
                dep->stream_capable = true;
        }
 
-       if (usb_endpoint_xfer_isoc(desc))
+       if (!usb_endpoint_xfer_control(desc))
                params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
 
        /*
@@ -1225,16 +1225,17 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
 
        int                             ret;
 
+       spin_lock_irqsave(&dwc->lock, flags);
        if (!dep->endpoint.desc) {
                dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
                                request, ep->name);
+               spin_unlock_irqrestore(&dwc->lock, flags);
                return -ESHUTDOWN;
        }
 
        dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
                        request, ep->name, request->length);
 
-       spin_lock_irqsave(&dwc->lock, flags);
        ret = __dwc3_gadget_ep_queue(dep, req);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -2041,12 +2042,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
                dwc3_endpoint_transfer_complete(dwc, dep, event);
                break;
        case DWC3_DEPEVT_XFERINPROGRESS:
-               if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-                       dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
-                                       dep->name);
-                       return;
-               }
-
                dwc3_endpoint_transfer_complete(dwc, dep, event);
                break;
        case DWC3_DEPEVT_XFERNOTREADY:
index dc30adf15a01d22cbde3c3a195a146373e302ef0..0dc3552d13603282d3d631aa1a7d06078bec0c3b 100644 (file)
@@ -155,6 +155,12 @@ struct ffs_io_data {
        struct usb_request *req;
 };
 
+struct ffs_desc_helper {
+       struct ffs_data *ffs;
+       unsigned interfaces_count;
+       unsigned eps_count;
+};
+
 static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
 
@@ -1830,7 +1836,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                                u8 *valuep, struct usb_descriptor_header *desc,
                                void *priv)
 {
-       struct ffs_data *ffs = priv;
+       struct ffs_desc_helper *helper = priv;
+       struct usb_endpoint_descriptor *d;
 
        ENTER();
 
@@ -1844,8 +1851,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                 * encountered interface "n" then there are at least
                 * "n+1" interfaces.
                 */
-               if (*valuep >= ffs->interfaces_count)
-                       ffs->interfaces_count = *valuep + 1;
+               if (*valuep >= helper->interfaces_count)
+                       helper->interfaces_count = *valuep + 1;
                break;
 
        case FFS_STRING:
@@ -1853,14 +1860,22 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
                 * Strings are indexed from 1 (0 is magic ;) reserved
                 * for languages list or some such)
                 */
-               if (*valuep > ffs->strings_count)
-                       ffs->strings_count = *valuep;
+               if (*valuep > helper->ffs->strings_count)
+                       helper->ffs->strings_count = *valuep;
                break;
 
        case FFS_ENDPOINT:
-               /* Endpoints are indexed from 1 as well. */
-               if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
-                       ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+               d = (void *)desc;
+               helper->eps_count++;
+               if (helper->eps_count >= 15)
+                       return -EINVAL;
+               /* Check if descriptors for any speed were already parsed */
+               if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
+                       helper->ffs->eps_addrmap[helper->eps_count] =
+                               d->bEndpointAddress;
+               else if (helper->ffs->eps_addrmap[helper->eps_count] !=
+                               d->bEndpointAddress)
+                       return -EINVAL;
                break;
        }
 
@@ -2053,6 +2068,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
        char *data = _data, *raw_descs;
        unsigned os_descs_count = 0, counts[3], flags;
        int ret = -EINVAL, i;
+       struct ffs_desc_helper helper;
 
        ENTER();
 
@@ -2101,13 +2117,29 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
 
        /* Read descriptors */
        raw_descs = data;
+       helper.ffs = ffs;
        for (i = 0; i < 3; ++i) {
                if (!counts[i])
                        continue;
+               helper.interfaces_count = 0;
+               helper.eps_count = 0;
                ret = ffs_do_descs(counts[i], data, len,
-                                  __ffs_data_do_entity, ffs);
+                                  __ffs_data_do_entity, &helper);
                if (ret < 0)
                        goto error;
+               if (!ffs->eps_count && !ffs->interfaces_count) {
+                       ffs->eps_count = helper.eps_count;
+                       ffs->interfaces_count = helper.interfaces_count;
+               } else {
+                       if (ffs->eps_count != helper.eps_count) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+                       if (ffs->interfaces_count != helper.interfaces_count) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+               }
                data += ret;
                len  -= ret;
        }
@@ -2342,9 +2374,18 @@ static void ffs_event_add(struct ffs_data *ffs,
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 }
 
-
 /* Bind/unbind USB function hooks *******************************************/
 
+static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
+               if (ffs->eps_addrmap[i] == endpoint_address)
+                       return i;
+       return -ENOENT;
+}
+
 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
                                    struct usb_descriptor_header *desc,
                                    void *priv)
@@ -2378,7 +2419,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
        if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
                return 0;
 
-       idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+       idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
+       if (idx < 0)
+               return idx;
+
        ffs_ep = func->eps + idx;
 
        if (unlikely(ffs_ep->descs[ep_desc_id])) {
index 63d6e71569c18d7ddcf26e92586798d30a97cbd2..d48897e8ffebf03db2f39ec77ba695364b4174a2 100644 (file)
@@ -224,6 +224,8 @@ struct ffs_data {
        void                            *ms_os_descs_ext_prop_name_avail;
        void                            *ms_os_descs_ext_prop_data_avail;
 
+       u8                              eps_addrmap[15];
+
        unsigned short                  strings_count;
        unsigned short                  interfaces_count;
        unsigned short                  eps_count;
index ae811d8d38b435d9cd198369b399b1cf58bda63c..ad39f892d20099a5047fc5a815fd9d33a03b1e65 100644 (file)
@@ -12,7 +12,7 @@
 
 
 #ifndef __FUSB300_UDC_H__
-#define __FUSB300_UDC_H_
+#define __FUSB300_UDC_H__
 
 #include <linux/kernel.h>
 
index f4eac113690ebd2e164e4768113aec6319b68290..2e95715b50c0967f02550dbe11e88c159dbd5b5c 100644 (file)
@@ -3320,7 +3320,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
        if (stat & tmp) {
                writel(tmp, &dev->regs->irqstat1);
                if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
-                               (readl(&dev->usb->usbstat) & mask)) ||
+                               ((readl(&dev->usb->usbstat) & mask) == 0)) ||
                                ((readl(&dev->usb->usbctl) &
                                        BIT(VBUS_PIN)) == 0)) &&
                                (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
index aa79e8749040b783281ec6f968620fc41ad41876..69aece31143a12d965af1014b0c133f773ffafcf 100644 (file)
@@ -468,7 +468,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
 }
 
 /* Updates Link Status for super Speed port */
-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
+static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+               u32 *status, u32 status_reg)
 {
        u32 pls = status_reg & PORT_PLS_MASK;
 
@@ -507,7 +508,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
                 * in which sometimes the port enters compliance mode
                 * caused by a delay on the host-device negotiation.
                 */
-               if (pls == USB_SS_PORT_LS_COMP_MOD)
+               if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                               (pls == USB_SS_PORT_LS_COMP_MOD))
                        pls |= USB_PORT_STAT_CONNECTION;
        }
 
@@ -666,7 +668,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
        }
        /* Update Port Link State */
        if (hcd->speed == HCD_USB3) {
-               xhci_hub_report_usb3_link_state(&status, raw_port_status);
+               xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
                /*
                 * Verify if all USB3 Ports Have entered U0 already.
                 * Delete Compliance Mode Timer if so.
index 8056d90690ee1bff397b0fa689f3ab9175622a6b..8936211b161d3ed6b0a2989359707287f2721b21 100644 (file)
@@ -1812,6 +1812,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 
        if (xhci->lpm_command)
                xhci_free_command(xhci, xhci->lpm_command);
+       xhci->lpm_command = NULL;
        if (xhci->cmd_ring)
                xhci_ring_free(xhci, xhci->cmd_ring);
        xhci->cmd_ring = NULL;
@@ -1819,7 +1820,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci_cleanup_command_queue(xhci);
 
        num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
-       for (i = 0; i < num_ports; i++) {
+       for (i = 0; i < num_ports && xhci->rh_bw; i++) {
                struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
                for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
                        struct list_head *ep = &bwt->interval_bw[j].endpoints;
index c020b094fe7d952d91f1f977a02d35de52dff451..c4a8fca8ae939f4fefeca3498bf967ce7aabcb9f 100644 (file)
@@ -3971,13 +3971,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
        int ret;
 
        spin_lock_irqsave(&xhci->lock, flags);
-       if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+
+       virt_dev = xhci->devs[udev->slot_id];
+
+       /*
+        * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
+        * xHC was re-initialized. Exit latency will be set later after
+        * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
+        */
+
+       if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
                spin_unlock_irqrestore(&xhci->lock, flags);
                return 0;
        }
 
        /* Attempt to issue an Evaluate Context command to change the MEL. */
-       virt_dev = xhci->devs[udev->slot_id];
        command = xhci->lpm_command;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
        if (!ctrl_ctx) {
index 47ae6455d0733c2d3ef4093aa4408e98969b3644..3ee133f675abee3e9c6921c4fa034855f57efba8 100644 (file)
@@ -39,6 +39,7 @@ struct cppi41_dma_channel {
        u32 transferred;
        u32 packet_sz;
        struct list_head tx_check;
+       int tx_zlp;
 };
 
 #define MUSB_DMA_NUM_CHANNELS 15
@@ -122,6 +123,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
 {
        struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
        struct musb *musb = hw_ep->musb;
+       void __iomem *epio = hw_ep->regs;
+       u16 csr;
 
        if (!cppi41_channel->prog_len ||
            (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
@@ -131,15 +134,24 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
                        cppi41_channel->transferred;
                cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
                cppi41_channel->channel.rx_packet_done = true;
+
+               /*
+                * transmit ZLP using PIO mode for transfers which size is
+                * multiple of EP packet size.
+                */
+               if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
+                                       cppi41_channel->packet_sz) == 0) {
+                       musb_ep_select(musb->mregs, hw_ep->epnum);
+                       csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
+                       musb_writew(epio, MUSB_TXCSR, csr);
+               }
                musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
        } else {
                /* next iteration, reload */
                struct dma_chan *dc = cppi41_channel->dc;
                struct dma_async_tx_descriptor *dma_desc;
                enum dma_transfer_direction direction;
-               u16 csr;
                u32 remain_bytes;
-               void __iomem *epio = cppi41_channel->hw_ep->regs;
 
                cppi41_channel->buf_addr += cppi41_channel->packet_sz;
 
@@ -363,6 +375,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
        cppi41_channel->total_len = len;
        cppi41_channel->transferred = 0;
        cppi41_channel->packet_sz = packet_sz;
+       cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
 
        /*
         * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
index c42bdf0c4a1f7322eed6ba4f7ce6b058f7c16fd9..00972eca04e7f5d4965f32fd0944c453f5291f07 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012-2014 Freescale Semiconductor, Inc.
  * Copyright (C) 2012 Marek Vasut <marex@denx.de>
  * on behalf of DENX Software Engineering GmbH
  *
@@ -125,7 +125,13 @@ static const struct mxs_phy_data imx6sl_phy_data = {
                MXS_PHY_NEED_IP_FIX,
 };
 
+static const struct mxs_phy_data imx6sx_phy_data = {
+       .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+               MXS_PHY_NEED_IP_FIX,
+};
+
 static const struct of_device_id mxs_phy_dt_ids[] = {
+       { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
        { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
        { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
        { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
index 13b4fa287da8aff05739a657fb9c24b3f5541ea7..886f1807a67bbdcb8fe5e17e91b45e382507db14 100644 (file)
@@ -878,8 +878,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
                return -ENOMEM;
        }
 
-       tegra_phy->config = devm_kzalloc(&pdev->dev,
-               sizeof(*tegra_phy->config), GFP_KERNEL);
+       tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
+                                        GFP_KERNEL);
        if (!tegra_phy->config) {
                dev_err(&pdev->dev,
                        "unable to allocate memory for USB UTMIP config\n");
index 4fd36530bfa35dfb9a83b89f7d02abd04b17fbea..b0c97a3f1bfed74e262c3aa818817259f31e987e 100644 (file)
@@ -108,19 +108,45 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
        return list_first_entry(&pipe->list, struct usbhs_pkt, node);
 }
 
+static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+                             struct usbhs_fifo *fifo);
+static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
+                                struct usbhs_fifo *fifo);
+static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
+                                           struct usbhs_pkt *pkt);
+#define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
+#define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
+static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
 {
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+       struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
        unsigned long flags;
 
        /********************  spin lock ********************/
        usbhs_lock(priv, flags);
 
+       usbhs_pipe_disable(pipe);
+
        if (!pkt)
                pkt = __usbhsf_pkt_get(pipe);
 
-       if (pkt)
+       if (pkt) {
+               struct dma_chan *chan = NULL;
+
+               if (fifo)
+                       chan = usbhsf_dma_chan_get(fifo, pkt);
+               if (chan) {
+                       dmaengine_terminate_all(chan);
+                       usbhsf_fifo_clear(pipe, fifo);
+                       usbhsf_dma_unmap(pkt);
+               }
+
                __usbhsf_pkt_del(pkt);
+       }
+
+       if (fifo)
+               usbhsf_fifo_unselect(pipe, fifo);
 
        usbhs_unlock(priv, flags);
        /********************  spin unlock ******************/
@@ -544,6 +570,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
                usbhsf_send_terminator(pipe, fifo);
 
        usbhsf_tx_irq_ctrl(pipe, !*is_done);
+       usbhs_pipe_running(pipe, !*is_done);
        usbhs_pipe_enable(pipe);
 
        dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
@@ -570,12 +597,21 @@ usbhs_fifo_write_busy:
         * retry in interrupt
         */
        usbhsf_tx_irq_ctrl(pipe, 1);
+       usbhs_pipe_running(pipe, 1);
 
        return ret;
 }
 
+static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+{
+       if (usbhs_pipe_is_running(pkt->pipe))
+               return 0;
+
+       return usbhsf_pio_try_push(pkt, is_done);
+}
+
 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
-       .prepare = usbhsf_pio_try_push,
+       .prepare = usbhsf_pio_prepare_push,
        .try_run = usbhsf_pio_try_push,
 };
 
@@ -589,6 +625,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
        if (usbhs_pipe_is_busy(pipe))
                return 0;
 
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        /*
         * pipe enable to prepare packet receive
         */
@@ -597,6 +636,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
 
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
        usbhs_pipe_enable(pipe);
+       usbhs_pipe_running(pipe, 1);
        usbhsf_rx_irq_ctrl(pipe, 1);
 
        return 0;
@@ -642,6 +682,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
            (total_len < maxp)) {               /* short packet */
                *is_done = 1;
                usbhsf_rx_irq_ctrl(pipe, 0);
+               usbhs_pipe_running(pipe, 0);
                usbhs_pipe_disable(pipe);       /* disable pipe first */
        }
 
@@ -763,8 +804,6 @@ static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
        usbhs_bset(priv, fifo->sel, DREQE, dreqe);
 }
 
-#define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
-#define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
@@ -805,6 +844,7 @@ static void xfer_work(struct work_struct *work)
        dev_dbg(dev, "  %s %d (%d/ %d)\n",
                fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
 
+       usbhs_pipe_running(pipe, 1);
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
        usbhs_pipe_enable(pipe);
        usbhsf_dma_start(pipe, fifo);
@@ -836,6 +876,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
        if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_push;
 
+       /* return at this time if the pipe is running */
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        /* get enable DMA fifo */
        fifo = usbhsf_get_dma_fifo(priv, pkt);
        if (!fifo)
@@ -869,15 +913,29 @@ usbhsf_pio_prepare_push:
 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
+       int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
+
+       pkt->actual += pkt->trans;
 
-       pkt->actual = pkt->trans;
+       if (pkt->actual < pkt->length)
+               *is_done = 0;           /* there are remainder data */
+       else if (is_short)
+               *is_done = 1;           /* short packet */
+       else
+               *is_done = !pkt->zero;  /* send zero packet? */
 
-       *is_done = !pkt->zero;  /* send zero packet ? */
+       usbhs_pipe_running(pipe, !*is_done);
 
        usbhsf_dma_stop(pipe, pipe->fifo);
        usbhsf_dma_unmap(pkt);
        usbhsf_fifo_unselect(pipe, pipe->fifo);
 
+       if (!*is_done) {
+               /* change handler to PIO */
+               pkt->handler = &usbhs_fifo_pio_push_handler;
+               return pkt->handler->try_run(pkt, is_done);
+       }
+
        return 0;
 }
 
@@ -972,8 +1030,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
        if ((pkt->actual == pkt->length) ||     /* receive all data */
            (pkt->trans < maxp)) {              /* short packet */
                *is_done = 1;
+               usbhs_pipe_running(pipe, 0);
        } else {
                /* re-enable */
+               usbhs_pipe_running(pipe, 0);
                usbhsf_prepare_pop(pkt, is_done);
        }
 
index 6a030b931a3b71d721870181d1170ba451dacc8b..9a705b15b3a1528b733c9eebac04e0d2f7ef2ee9 100644 (file)
@@ -213,7 +213,10 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
 {
        struct usbhs_mod *mod = usbhs_mod_get_current(priv);
        u16 intenb0, intenb1;
+       unsigned long flags;
 
+       /********************  spin lock ********************/
+       usbhs_lock(priv, flags);
        state->intsts0 = usbhs_read(priv, INTSTS0);
        state->intsts1 = usbhs_read(priv, INTSTS1);
 
@@ -229,6 +232,8 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
                state->bempsts &= mod->irq_bempsts;
                state->brdysts &= mod->irq_brdysts;
        }
+       usbhs_unlock(priv, flags);
+       /********************  spin unlock ******************/
 
        /*
         * Check whether the irq enable registers and the irq status are set
index 75fbcf6b102e86f8678434bd75188e71005d2504..040bcefcb0402c80116b917ea6595e1d01ed0a4d 100644 (file)
@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
        return usbhsp_flags_has(pipe, IS_DIR_HOST);
 }
 
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
+{
+       return usbhsp_flags_has(pipe, IS_RUNNING);
+}
+
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
+{
+       if (running)
+               usbhsp_flags_set(pipe, IS_RUNNING);
+       else
+               usbhsp_flags_clr(pipe, IS_RUNNING);
+}
+
 void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
 {
        u16 mask = (SQCLR | SQSET);
index 406f36d050e4facf2c24b04fc254e2cbd639c623..d24a059723704cbae9c09c501f580c3ff0a450b7 100644 (file)
@@ -36,6 +36,7 @@ struct usbhs_pipe {
 #define USBHS_PIPE_FLAGS_IS_USED               (1 << 0)
 #define USBHS_PIPE_FLAGS_IS_DIR_IN             (1 << 1)
 #define USBHS_PIPE_FLAGS_IS_DIR_HOST           (1 << 2)
+#define USBHS_PIPE_FLAGS_IS_RUNNING            (1 << 3)
 
        struct usbhs_pkt_handle *handler;
 
@@ -80,6 +81,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv);
 void usbhs_pipe_remove(struct usbhs_priv *priv);
 int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
 int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
+
 void usbhs_pipe_init(struct usbhs_priv *priv,
                     int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
 int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
index 824ea5e7ec8b7b1b67a7820e6fbb691197aee030..dc72b924c399e955e8cd94f68c8337f1b7e3facd 100644 (file)
@@ -728,6 +728,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
                .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+       { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -939,6 +940,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
        /* Infineon Devices */
        { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+       /* GE Healthcare devices */
+       { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
        { }                                     /* Terminating entry */
 };
 
index 70b0b1d88ae9b383fde7f609015a9f0592797934..5937b2d242f28c14064d56b073dbbda5fb737093 100644 (file)
 #define TELLDUS_VID                    0x1781  /* Vendor ID */
 #define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
 
+/*
+ * NOVITUS printers
+ */
+#define NOVITUS_VID                    0x1a28
+#define NOVITUS_BONO_E_PID             0x6010
+
 /*
  * RT Systems programming cables for various ham radios
  */
  * ekey biometric systems GmbH (http://ekey.net/)
  */
 #define FTDI_EKEY_CONV_USB_PID         0xCB08  /* Converter USB */
+
+/*
+ * GE Healthcare devices
+ */
+#define GE_HEALTHCARE_VID              0x1901
+#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
index 6f7f01eb556a3af29634ce6e8a3e0745d6815e2e..46179a0828ebcbad9a78c11dad8044edff27664a 100644 (file)
@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
        /* Sierra Wireless HSPA Non-Composite Device */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
        { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
-       { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
+       /* Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
-       { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
+       /* Airprime/Sierra Wireless Direct IP modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
 
index 1a132e9e947ac44ae7ac31554e39078f3cc122ca..c9bb107d5e5ccae2ffaa7823f70872c0de86d419 100644 (file)
@@ -272,6 +272,14 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
 }
 
 static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x19d2, 0xffec) },
+       { USB_DEVICE(0x19d2, 0xffee) },
+       { USB_DEVICE(0x19d2, 0xfff6) },
+       { USB_DEVICE(0x19d2, 0xfff7) },
+       { USB_DEVICE(0x19d2, 0xfff8) },
+       { USB_DEVICE(0x19d2, 0xfff9) },
+       { USB_DEVICE(0x19d2, 0xfffb) },
+       { USB_DEVICE(0x19d2, 0xfffc) },
        /* MG880 */
        { USB_DEVICE(0x19d2, 0xfffd) },
        { },
index 503ac5c8d80f7bb7bcb87832df78a71d8c14587f..8a6f371ed6e77e3ccdc99632c3cd41ebeb155213 100644 (file)
@@ -59,10 +59,6 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        unsigned long flags = id->driver_info;
        int r, alt;
 
-       usb_stor_adjust_quirks(udev, &flags);
-
-       if (flags & US_FL_IGNORE_UAS)
-               return 0;
 
        alt = uas_find_uas_alt_setting(intf);
        if (alt < 0)
@@ -72,6 +68,29 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        if (r < 0)
                return 0;
 
+       /*
+        * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
+        * broken on the ASM1051, use the number of streams to differentiate.
+        * New ASM1053-s also support 32 streams, but have a different prod-id.
+        */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
+                       le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
+               if (udev->speed < USB_SPEED_SUPER) {
+                       /* No streams info, assume ASM1051 */
+                       flags |= US_FL_IGNORE_UAS;
+               } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+                       flags |= US_FL_IGNORE_UAS;
+               }
+       }
+
+       usb_stor_adjust_quirks(udev, &flags);
+
+       if (flags & US_FL_IGNORE_UAS) {
+               dev_warn(&udev->dev,
+                       "UAS is blacklisted for this device, using usb-storage instead\n");
+               return 0;
+       }
+
        if (udev->bus->sg_tablesize == 0) {
                dev_warn(&udev->dev,
                        "The driver for the USB controller %s does not support scatter-gather which is\n",
index 7ef99b2f3aaf75ff3222db35292ca3edaa7ac607..60cfcbc78552ceef5e9876610d30647d1aa67884 100644 (file)
@@ -741,6 +741,12 @@ UNUSUAL_DEV(  0x059b, 0x0001, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_SINGLE_LUN ),
 
+UNUSUAL_DEV(  0x059b, 0x0040, 0x0100, 0x0100,
+               "Iomega",
+               "Jaz USB Adapter",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by <Hendryk.Pfeiffer@gmx.de> */
 UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
                "LaCie",
index 80079b8fed155c39f109bd65d12e4e5e155a1b3c..d0303f0dbe15348d7eee118c74a94e0fbfbd6f45 100644 (file)
@@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
        uwb_dev->mac_addr = *bce->mac_addr;
        uwb_dev->dev_addr = bce->dev_addr;
        dev_set_name(&uwb_dev->dev, "%s", macbuf);
+
+       /* plug the beacon cache */
+       bce->uwb_dev = uwb_dev;
+       uwb_dev->bce = bce;
+       uwb_bce_get(bce);               /* released in uwb_dev_sys_release() */
+
        result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
        if (result < 0) {
                dev_err(dev, "new device %s: cannot instantiate device\n",
                        macbuf);
                goto error_dev_add;
        }
-       /* plug the beacon cache */
-       bce->uwb_dev = uwb_dev;
-       uwb_dev->bce = bce;
-       uwb_bce_get(bce);               /* released in uwb_dev_sys_release() */
+
        dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
                 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
                 dev_name(rc->uwb_dev.dev.parent));
@@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
        return;
 
 error_dev_add:
+       bce->uwb_dev = NULL;
+       uwb_bce_put(bce);
        kfree(uwb_dev);
        return;
 }
index a7b6217ac87b4086eae386ed6ebaafe9cf500481..6ad23bd3523a991b8fc9096adc272708e83c85dc 100644 (file)
@@ -639,9 +639,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
                if (g0 != panels[i].g0)
                        continue;
                if (r0 == panels[i].r0 && b0 == panels[i].b0)
-                       fb->panel->caps = panels[i].caps & CLCD_CAP_RGB;
-               if (r0 == panels[i].b0 && b0 == panels[i].r0)
-                       fb->panel->caps = panels[i].caps & CLCD_CAP_BGR;
+                       fb->panel->caps = panels[i].caps;
        }
 
        return fb->panel->caps ? 0 : -EINVAL;
index 5c660c77f03b58a32c24749b7053df6608a23230..1e0a317d3dcdda5b6041ae8481006f1d1479934b 100644 (file)
@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit)
        rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 
        if (rc) {
-               pr_info("%s: add_memory() failed: %i\n", __func__, rc);
-               return BP_EAGAIN;
+               pr_warn("Cannot add additional memory (%i)\n", rc);
+               return BP_ECANCELED;
        }
 
        balloon_hotplug -= credit;
index 787d17945418b19c53d408d710e2558a6bb85ad8..e53fe191738cfe8ce7c7cfe6202c2651258ff952 100644 (file)
@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
        int i, rc, readonly;
        LIST_HEAD(queue_gref);
        LIST_HEAD(queue_file);
-       struct gntalloc_gref *gref;
+       struct gntalloc_gref *gref, *next;
 
        readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
        rc = -ENOMEM;
@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
                        goto undo;
 
                /* Grant foreign access to the page. */
-               gref->gref_id = gnttab_grant_foreign_access(op->domid,
+               rc = gnttab_grant_foreign_access(op->domid,
                        pfn_to_mfn(page_to_pfn(gref->page)), readonly);
-               if ((int)gref->gref_id < 0) {
-                       rc = gref->gref_id;
+               if (rc < 0)
                        goto undo;
-               }
-               gref_ids[i] = gref->gref_id;
+               gref_ids[i] = gref->gref_id = rc;
        }
 
        /* Add to gref lists. */
@@ -162,8 +160,8 @@ undo:
        mutex_lock(&gref_mutex);
        gref_size -= (op->count - i);
 
-       list_for_each_entry(gref, &queue_file, next_file) {
-               /* __del_gref does not remove from queue_file */
+       list_for_each_entry_safe(gref, next, &queue_file, next_file) {
+               list_del(&gref->next_file);
                __del_gref(gref);
        }
 
@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref)
 
        gref->notify.flags = 0;
 
-       if (gref->gref_id > 0) {
+       if (gref->gref_id) {
                if (gnttab_query_foreign_access(gref->gref_id))
                        return;
 
index 5f1e1f3cd18619ed2899dcc5e967ca274f690189..f8bb36f9d9cef862c6c502b5ba25b7d38476cd85 100644 (file)
@@ -103,16 +103,11 @@ static void do_suspend(void)
 
        shutting_down = SHUTDOWN_SUSPEND;
 
-#ifdef CONFIG_PREEMPT
-       /* If the kernel is preemptible, we need to freeze all the processes
-          to prevent them from being in the middle of a pagetable update
-          during suspend. */
        err = freeze_processes();
        if (err) {
                pr_err("%s: freeze failed %d\n", __func__, err);
                goto out;
        }
-#endif
 
        err = dpm_suspend_start(PMSG_FREEZE);
        if (err) {
@@ -157,10 +152,8 @@ out_resume:
        dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
 
 out_thaw:
-#ifdef CONFIG_PREEMPT
        thaw_processes();
 out:
-#endif
        shutting_down = SHUTDOWN_INVALID;
 }
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
index 97bc62cbe2dab816cd33b20b86e6baa73d2ae19f..733750096b71b38d23783926b4f85da9ac2d35ea 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -793,6 +793,8 @@ void exit_aio(struct mm_struct *mm)
 
        for (i = 0; i < table->nr; ++i) {
                struct kioctx *ctx = table->table[i];
+               struct completion requests_done =
+                       COMPLETION_INITIALIZER_ONSTACK(requests_done);
 
                if (!ctx)
                        continue;
@@ -804,7 +806,10 @@ void exit_aio(struct mm_struct *mm)
                 * that it needs to unmap the area, just set it to 0.
                 */
                ctx->mmap_size = 0;
-               kill_ioctx(mm, ctx, NULL);
+               kill_ioctx(mm, ctx, &requests_done);
+
+               /* Wait until all IO for the context are done. */
+               wait_for_completion(&requests_done);
        }
 
        RCU_INIT_POINTER(mm->ioctx_table, NULL);
@@ -1111,6 +1116,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
        tail = ring->tail;
        kunmap_atomic(ring);
 
+       /*
+        * Ensure that once we've read the current tail pointer, that
+        * we also see the events that were stored up to the tail.
+        */
+       smp_rmb();
+
        pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
 
        if (head == tail)
index 36861b7a6757673189737024b62fcce9842bc3d0..ff1cc0399b9a206c127b4707b6dcd69d455d8231 100644 (file)
@@ -1966,7 +1966,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        btrfs_init_log_ctx(&ctx);
 
-       ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx);
+       ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
                /* Fallthrough and commit/free transaction. */
                ret = 1;
index 9c194bd74d6e513c075b077b62ee1754cd02e966..016c403bfe7e4241b33c6b6c3e4101ba5ea993b2 100644 (file)
@@ -778,8 +778,12 @@ retry:
                                                ins.offset,
                                                BTRFS_ORDERED_COMPRESSED,
                                                async_extent->compress_type);
-               if (ret)
+               if (ret) {
+                       btrfs_drop_extent_cache(inode, async_extent->start,
+                                               async_extent->start +
+                                               async_extent->ram_size - 1, 0);
                        goto out_free_reserve;
+               }
 
                /*
                 * clear dirty, set writeback and unlock the pages.
@@ -971,14 +975,14 @@ static noinline int cow_file_range(struct inode *inode,
                ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
                                               ram_size, cur_alloc_size, 0);
                if (ret)
-                       goto out_reserve;
+                       goto out_drop_extent_cache;
 
                if (root->root_key.objectid ==
                    BTRFS_DATA_RELOC_TREE_OBJECTID) {
                        ret = btrfs_reloc_clone_csums(inode, start,
                                                      cur_alloc_size);
                        if (ret)
-                               goto out_reserve;
+                               goto out_drop_extent_cache;
                }
 
                if (disk_num_bytes < cur_alloc_size)
@@ -1006,6 +1010,8 @@ static noinline int cow_file_range(struct inode *inode,
 out:
        return ret;
 
+out_drop_extent_cache:
+       btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
 out_reserve:
        btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_unlock:
@@ -4242,7 +4248,8 @@ out:
                        btrfs_abort_transaction(trans, root, ret);
        }
 error:
-       if (last_size != (u64)-1)
+       if (last_size != (u64)-1 &&
+           root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
                btrfs_ordered_update_i_size(inode, last_size, NULL);
        btrfs_free_path(path);
        return err;
@@ -5627,6 +5634,17 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
        return ret;
 }
 
+static int btrfs_insert_inode_locked(struct inode *inode)
+{
+       struct btrfs_iget_args args;
+       args.location = &BTRFS_I(inode)->location;
+       args.root = BTRFS_I(inode)->root;
+
+       return insert_inode_locked4(inode,
+                  btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
+                  btrfs_find_actor, &args);
+}
+
 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
                                     struct inode *dir,
@@ -5719,10 +5737,19 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                sizes[1] = name_len + sizeof(*ref);
        }
 
+       location = &BTRFS_I(inode)->location;
+       location->objectid = objectid;
+       location->offset = 0;
+       btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+
+       ret = btrfs_insert_inode_locked(inode);
+       if (ret < 0)
+               goto fail;
+
        path->leave_spinning = 1;
        ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
        if (ret != 0)
-               goto fail;
+               goto fail_unlock;
 
        inode_init_owner(inode, dir, mode);
        inode_set_bytes(inode, 0);
@@ -5745,11 +5772,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(path->nodes[0]);
        btrfs_free_path(path);
 
-       location = &BTRFS_I(inode)->location;
-       location->objectid = objectid;
-       location->offset = 0;
-       btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
-
        btrfs_inherit_iflags(inode, dir);
 
        if (S_ISREG(mode)) {
@@ -5760,7 +5782,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                BTRFS_INODE_NODATASUM;
        }
 
-       btrfs_insert_inode_hash(inode);
        inode_tree_add(inode);
 
        trace_btrfs_inode_new(inode);
@@ -5775,6 +5796,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                          btrfs_ino(inode), root->root_key.objectid, ret);
 
        return inode;
+
+fail_unlock:
+       unlock_new_inode(inode);
 fail:
        if (dir && name)
                BTRFS_I(dir)->index_cnt--;
@@ -5909,28 +5933,28 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
        * if the filesystem supports xattrs by looking at the
        * ops vector.
        */
-
        inode->i_op = &btrfs_special_inode_operations;
-       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+       init_special_inode(inode, inode->i_mode, rdev);
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               drop_inode = 1;
-       else {
-               init_special_inode(inode, inode->i_mode, rdev);
+               goto out_unlock_inode;
+
+       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+       if (err) {
+               goto out_unlock_inode;
+       } else {
                btrfs_update_inode(trans, root, inode);
+               unlock_new_inode(inode);
                d_instantiate(dentry, inode);
        }
+
 out_unlock:
        btrfs_end_transaction(trans, root);
        btrfs_balance_delayed_items(root);
@@ -5940,6 +5964,12 @@ out_unlock:
                iput(inode);
        }
        return err;
+
+out_unlock_inode:
+       drop_inode = 1;
+       unlock_new_inode(inode);
+       goto out_unlock;
+
 }
 
 static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -5974,15 +6004,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
        drop_inode_on_err = 1;
-
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err)
-               goto out_unlock;
-
-       err = btrfs_update_inode(trans, root, inode);
-       if (err)
-               goto out_unlock;
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -5991,14 +6012,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        */
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
+       inode->i_mapping->a_ops = &btrfs_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+       if (err)
+               goto out_unlock_inode;
+
+       err = btrfs_update_inode(trans, root, inode);
+       if (err)
+               goto out_unlock_inode;
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
-               goto out_unlock;
+               goto out_unlock_inode;
 
-       inode->i_mapping->a_ops = &btrfs_aops;
-       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 
 out_unlock:
@@ -6010,6 +6040,11 @@ out_unlock:
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_unlock_inode:
+       unlock_new_inode(inode);
+       goto out_unlock;
+
 }
 
 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6117,25 +6152,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        }
 
        drop_on_err = 1;
+       /* these must be set before we unlock the inode */
+       inode->i_op = &btrfs_dir_inode_operations;
+       inode->i_fop = &btrfs_dir_file_operations;
 
        err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
        if (err)
-               goto out_fail;
-
-       inode->i_op = &btrfs_dir_inode_operations;
-       inode->i_fop = &btrfs_dir_file_operations;
+               goto out_fail_inode;
 
        btrfs_i_size_write(inode, 0);
        err = btrfs_update_inode(trans, root, inode);
        if (err)
-               goto out_fail;
+               goto out_fail_inode;
 
        err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
                             dentry->d_name.len, 0, index);
        if (err)
-               goto out_fail;
+               goto out_fail_inode;
 
        d_instantiate(dentry, inode);
+       /*
+        * mkdir is special.  We're unlocking after we call d_instantiate
+        * to avoid a race with nfsd calling d_instantiate.
+        */
+       unlock_new_inode(inode);
        drop_on_err = 0;
 
 out_fail:
@@ -6145,6 +6185,10 @@ out_fail:
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_fail_inode:
+       unlock_new_inode(inode);
+       goto out_fail;
 }
 
 /* helper for btfs_get_extent.  Given an existing extent in the tree,
@@ -8100,6 +8144,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
 
        set_nlink(inode, 1);
        btrfs_i_size_write(inode, 0);
+       unlock_new_inode(inode);
 
        err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
        if (err)
@@ -8760,12 +8805,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -8774,23 +8813,22 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        */
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
+       inode->i_mapping->a_ops = &btrfs_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+       BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+
+       err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+       if (err)
+               goto out_unlock_inode;
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
-               drop_inode = 1;
-       else {
-               inode->i_mapping->a_ops = &btrfs_aops;
-               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
-       }
-       if (drop_inode)
-               goto out_unlock;
+               goto out_unlock_inode;
 
        path = btrfs_alloc_path();
        if (!path) {
                err = -ENOMEM;
-               drop_inode = 1;
-               goto out_unlock;
+               goto out_unlock_inode;
        }
        key.objectid = btrfs_ino(inode);
        key.offset = 0;
@@ -8799,9 +8837,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        err = btrfs_insert_empty_item(trans, root, path, &key,
                                      datasize);
        if (err) {
-               drop_inode = 1;
                btrfs_free_path(path);
-               goto out_unlock;
+               goto out_unlock_inode;
        }
        leaf = path->nodes[0];
        ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -8825,12 +8862,15 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        inode_set_bytes(inode, name_len);
        btrfs_i_size_write(inode, name_len);
        err = btrfs_update_inode(trans, root, inode);
-       if (err)
+       if (err) {
                drop_inode = 1;
+               goto out_unlock_inode;
+       }
+
+       unlock_new_inode(inode);
+       d_instantiate(dentry, inode);
 
 out_unlock:
-       if (!err)
-               d_instantiate(dentry, inode);
        btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -8838,6 +8878,11 @@ out_unlock:
        }
        btrfs_btree_balance_dirty(root);
        return err;
+
+out_unlock_inode:
+       drop_inode = 1;
+       unlock_new_inode(inode);
+       goto out_unlock;
 }
 
 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -9021,14 +9066,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
                goto out;
        }
 
-       ret = btrfs_init_inode_security(trans, inode, dir, NULL);
-       if (ret)
-               goto out;
-
-       ret = btrfs_update_inode(trans, root, inode);
-       if (ret)
-               goto out;
-
        inode->i_fop = &btrfs_file_operations;
        inode->i_op = &btrfs_file_inode_operations;
 
@@ -9036,9 +9073,16 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
 
+       ret = btrfs_init_inode_security(trans, inode, dir, NULL);
+       if (ret)
+               goto out_inode;
+
+       ret = btrfs_update_inode(trans, root, inode);
+       if (ret)
+               goto out_inode;
        ret = btrfs_orphan_add(trans, inode);
        if (ret)
-               goto out;
+               goto out_inode;
 
        /*
         * We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -9048,6 +9092,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
         *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
         */
        set_nlink(inode, 1);
+       unlock_new_inode(inode);
        d_tmpfile(dentry, inode);
        mark_inode_dirty(inode);
 
@@ -9057,8 +9102,12 @@ out:
                iput(inode);
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
-
        return ret;
+
+out_inode:
+       unlock_new_inode(inode);
+       goto out;
+
 }
 
 static const struct inode_operations btrfs_dir_inode_operations = {
index fce6fd0e3f50c729adcf278b1b2a13bd2ede1494..8a8e29878c34283812f8d990c2432cff94bbab2a 100644 (file)
@@ -1019,8 +1019,10 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
                return false;
 
        next = defrag_lookup_extent(inode, em->start + em->len);
-       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE ||
-           (em->block_start + em->block_len == next->block_start))
+       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+               ret = false;
+       else if ((em->block_start + em->block_len == next->block_start) &&
+                (em->block_len > 128 * 1024 && next->block_len > 128 * 1024))
                ret = false;
 
        free_extent_map(next);
@@ -1055,7 +1057,6 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh,
        }
 
        next_mergeable = defrag_check_next_extent(inode, em);
-
        /*
         * we hit a real extent, if it is big or the next extent is not a
         * real extent, don't bother defragging it
@@ -1702,7 +1703,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
            ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
              BTRFS_SUBVOL_QGROUP_INHERIT)) {
                ret = -EOPNOTSUPP;
-               goto out;
+               goto free_args;
        }
 
        if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
@@ -1712,27 +1713,31 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
                if (vol_args->size > PAGE_CACHE_SIZE) {
                        ret = -EINVAL;
-                       goto out;
+                       goto free_args;
                }
                inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
                if (IS_ERR(inherit)) {
                        ret = PTR_ERR(inherit);
-                       goto out;
+                       goto free_args;
                }
        }
 
        ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
                                              vol_args->fd, subvol, ptr,
                                              readonly, inherit);
+       if (ret)
+               goto free_inherit;
 
-       if (ret == 0 && ptr &&
-           copy_to_user(arg +
-                        offsetof(struct btrfs_ioctl_vol_args_v2,
-                                 transid), ptr, sizeof(*ptr)))
+       if (ptr && copy_to_user(arg +
+                               offsetof(struct btrfs_ioctl_vol_args_v2,
+                                       transid),
+                               ptr, sizeof(*ptr)))
                ret = -EFAULT;
-out:
-       kfree(vol_args);
+
+free_inherit:
        kfree(inherit);
+free_args:
+       kfree(vol_args);
        return ret;
 }
 
@@ -2652,7 +2657,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args)) {
                ret = PTR_ERR(vol_args);
-               goto out;
+               goto err_drop;
        }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
@@ -2670,6 +2675,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 
 out:
        kfree(vol_args);
+err_drop:
        mnt_drop_write_file(file);
        return ret;
 }
index 7e0e6e3029dd4e36e9fd72e68b151ac991b66c12..d296efe2d3e76ce4f690687a9647cf7f8c71a27f 100644 (file)
 #define LOG_WALK_REPLAY_ALL 3
 
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root, struct inode *inode,
-                            int inode_only);
+                          struct btrfs_root *root, struct inode *inode,
+                          int inode_only,
+                          const loff_t start,
+                          const loff_t end);
 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
                             struct btrfs_path *path, u64 objectid);
@@ -3858,8 +3860,10 @@ process:
  * This handles both files and directories.
  */
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root, struct inode *inode,
-                            int inode_only)
+                          struct btrfs_root *root, struct inode *inode,
+                          int inode_only,
+                          const loff_t start,
+                          const loff_t end)
 {
        struct btrfs_path *path;
        struct btrfs_path *dst_path;
@@ -3876,6 +3880,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        int ins_nr;
        bool fast_search = false;
        u64 ino = btrfs_ino(inode);
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -4049,13 +4054,35 @@ log_extents:
                        goto out_unlock;
                }
        } else if (inode_only == LOG_INODE_ALL) {
-               struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
                struct extent_map *em, *n;
 
-               write_lock(&tree->lock);
-               list_for_each_entry_safe(em, n, &tree->modified_extents, list)
-                       list_del_init(&em->list);
-               write_unlock(&tree->lock);
+               write_lock(&em_tree->lock);
+               /*
+                * We can't just remove every em if we're called for a ranged
+                * fsync - that is, one that doesn't cover the whole possible
+                * file range (0 to LLONG_MAX). This is because we can have
+                * em's that fall outside the range we're logging and therefore
+                * their ordered operations haven't completed yet
+                * (btrfs_finish_ordered_io() not invoked yet). This means we
+                * didn't get their respective file extent item in the fs/subvol
+                * tree yet, and need to let the next fast fsync (one which
+                * consults the list of modified extent maps) find the em so
+                * that it logs a matching file extent item and waits for the
+                * respective ordered operation to complete (if it's still
+                * running).
+                *
+                * Removing every em outside the range we're logging would make
+                * the next fast fsync not log their matching file extent items,
+                * therefore making us lose data after a log replay.
+                */
+               list_for_each_entry_safe(em, n, &em_tree->modified_extents,
+                                        list) {
+                       const u64 mod_end = em->mod_start + em->mod_len - 1;
+
+                       if (em->mod_start >= start && mod_end <= end)
+                               list_del_init(&em->list);
+               }
+               write_unlock(&em_tree->lock);
        }
 
        if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
@@ -4065,8 +4092,19 @@ log_extents:
                        goto out_unlock;
                }
        }
-       BTRFS_I(inode)->logged_trans = trans->transid;
-       BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
+
+       write_lock(&em_tree->lock);
+       /*
+        * If we're doing a ranged fsync and there are still modified extents
+        * in the list, we must run on the next fsync call as it might cover
+        * those extents (a full fsync or an fsync for other range).
+        */
+       if (list_empty(&em_tree->modified_extents)) {
+               BTRFS_I(inode)->logged_trans = trans->transid;
+               BTRFS_I(inode)->last_log_commit =
+                       BTRFS_I(inode)->last_sub_trans;
+       }
+       write_unlock(&em_tree->lock);
 out_unlock:
        if (unlikely(err))
                btrfs_put_logged_extents(&logged_list);
@@ -4161,7 +4199,10 @@ out:
  */
 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root, struct inode *inode,
-                                 struct dentry *parent, int exists_only,
+                                 struct dentry *parent,
+                                 const loff_t start,
+                                 const loff_t end,
+                                 int exists_only,
                                  struct btrfs_log_ctx *ctx)
 {
        int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
@@ -4207,7 +4248,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        if (ret)
                goto end_no_trans;
 
-       ret = btrfs_log_inode(trans, root, inode, inode_only);
+       ret = btrfs_log_inode(trans, root, inode, inode_only, start, end);
        if (ret)
                goto end_trans;
 
@@ -4235,7 +4276,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 
                if (BTRFS_I(inode)->generation >
                    root->fs_info->last_trans_committed) {
-                       ret = btrfs_log_inode(trans, root, inode, inode_only);
+                       ret = btrfs_log_inode(trans, root, inode, inode_only,
+                                             0, LLONG_MAX);
                        if (ret)
                                goto end_trans;
                }
@@ -4269,13 +4311,15 @@ end_no_trans:
  */
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry,
+                         const loff_t start,
+                         const loff_t end,
                          struct btrfs_log_ctx *ctx)
 {
        struct dentry *parent = dget_parent(dentry);
        int ret;
 
        ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
-                                    0, ctx);
+                                    start, end, 0, ctx);
        dput(parent);
 
        return ret;
@@ -4512,6 +4556,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                    root->fs_info->last_trans_committed))
                return 0;
 
-       return btrfs_log_inode_parent(trans, root, inode, parent, 1, NULL);
+       return btrfs_log_inode_parent(trans, root, inode, parent, 0,
+                                     LLONG_MAX, 1, NULL);
 }
 
index 7f5b41bd5373810a04159d8c28facbd289c5c03c..e2e798ae7cd7c6c989a6633fe7dae682ab4a5148 100644 (file)
@@ -59,6 +59,8 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
 int btrfs_recover_log_trees(struct btrfs_root *tree_root);
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry,
+                         const loff_t start,
+                         const loff_t end,
                          struct btrfs_log_ctx *ctx);
 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
index 603f18a65c121e5586f903baff101a8637bdb852..a2172f3f69e318915f092885da0b4d49755d33f1 100644 (file)
@@ -22,6 +22,11 @@ config CIFS
          support for OS/2 and Windows ME and similar servers is provided as
          well.
 
+         The module also provides optional support for the followon
+         protocols for CIFS including SMB3, which enables
+         useful performance and security features (see the description
+         of CONFIG_CIFS_SMB2).
+
          The cifs module provides an advanced network file system
          client for mounting to CIFS compliant servers.  It includes
          support for DFS (hierarchical name space), secure per-user
@@ -121,7 +126,8 @@ config CIFS_ACL
          depends on CIFS_XATTR && KEYS
          help
            Allows fetching CIFS/NTFS ACL from the server.  The DACL blob
-           is handed over to the application/caller.
+           is handed over to the application/caller.  See the man
+           page for getcifsacl for more information.
 
 config CIFS_DEBUG
        bool "Enable CIFS debugging routines"
@@ -162,7 +168,7 @@ config CIFS_NFSD_EXPORT
           Allows NFS server to export a CIFS mounted share (nfsd over cifs)
 
 config CIFS_SMB2
-       bool "SMB2 network file system support"
+       bool "SMB2 and SMB3 network file system support"
        depends on CIFS && INET
        select NLS
        select KEYS
@@ -170,16 +176,21 @@ config CIFS_SMB2
        select DNS_RESOLVER
 
        help
-         This enables experimental support for the SMB2 (Server Message Block
-         version 2) protocol. The SMB2 protocol is the successor to the
-         popular CIFS and SMB network file sharing protocols. SMB2 is the
-         native file sharing mechanism for recent versions of Windows
-         operating systems (since Vista).  SMB2 enablement will eventually
-         allow users better performance, security and features, than would be
-         possible with cifs. Note that smb2 mount options also are simpler
-         (compared to cifs) due to protocol improvements.
-
-         Unless you are a developer or tester, say N.
+         This enables support for the Server Message Block version 2
+         family of protocols, including SMB3.  SMB3 support is
+         enabled on mount by specifying "vers=3.0" in the mount
+         options. These protocols are the successors to the popular
+         CIFS and SMB network file sharing protocols. SMB3 is the
+         native file sharing mechanism for the more recent
+         versions of Windows (Windows 8 and Windows 2012 and
+         later) and Samba server and many others support SMB3 well.
+         In general SMB3 enables better performance, security
+         and features, than would be possible with CIFS (Note that
+         when mounting to Samba, due to the CIFS POSIX extensions,
+         CIFS mounts can provide slightly better POSIX compatibility
+         than SMB3 mounts do though). Note that SMB2/SMB3 mount
+         options are also slightly simpler (compared to CIFS) due
+         to protocol improvements.
 
 config CIFS_FSCACHE
          bool "Provide CIFS client caching support"
index dfc731b02aa9b3daca3a15d95346bcd82cc2b3d5..25b8392bfdd2a7fd9864af5008e809dc41e3a7df 100644 (file)
 #define SERVER_NAME_LENGTH 40
 #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
 
-/* used to define string lengths for reversing unicode strings */
-/*         (256+1)*2 = 514                                     */
-/*           (max path length + 1 for null) * 2 for unicode    */
-#define MAX_NAME 514
-
 /* SMB echo "timeout" -- FIXME: tunable? */
 #define SMB_ECHO_INTERVAL (60 * HZ)
 
index 03ed8a09581ca0104468d1207ff0cadefd0f9390..8a9fded7c135f6deb936873ad06fa86fc11c9f85 100644 (file)
@@ -837,7 +837,6 @@ cifs_demultiplex_thread(void *p)
        struct TCP_Server_Info *server = p;
        unsigned int pdu_length;
        char *buf = NULL;
-       struct task_struct *task_to_wake = NULL;
        struct mid_q_entry *mid_entry;
 
        current->flags |= PF_MEMALLOC;
@@ -928,19 +927,7 @@ cifs_demultiplex_thread(void *p)
        if (server->smallbuf) /* no sense logging a debug message if NULL */
                cifs_small_buf_release(server->smallbuf);
 
-       task_to_wake = xchg(&server->tsk, NULL);
        clean_demultiplex_info(server);
-
-       /* if server->tsk was NULL then wait for a signal before exiting */
-       if (!task_to_wake) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               while (!signal_pending(current)) {
-                       schedule();
-                       set_current_state(TASK_INTERRUPTIBLE);
-               }
-               set_current_state(TASK_RUNNING);
-       }
-
        module_put_and_exit(0);
 }
 
@@ -1600,6 +1587,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        tmp_end++;
                        if (!(tmp_end < end && tmp_end[1] == delim)) {
                                /* No it is not. Set the password to NULL */
+                               kfree(vol->password);
                                vol->password = NULL;
                                break;
                        }
@@ -1637,6 +1625,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                        options = end;
                        }
 
+                       kfree(vol->password);
                        /* Now build new password string */
                        temp_len = strlen(value);
                        vol->password = kzalloc(temp_len+1, GFP_KERNEL);
@@ -2061,8 +2050,6 @@ cifs_find_tcp_session(struct smb_vol *vol)
 static void
 cifs_put_tcp_session(struct TCP_Server_Info *server)
 {
-       struct task_struct *task;
-
        spin_lock(&cifs_tcp_ses_lock);
        if (--server->srv_count > 0) {
                spin_unlock(&cifs_tcp_ses_lock);
@@ -2086,10 +2073,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
        kfree(server->session_key.response);
        server->session_key.response = NULL;
        server->session_key.len = 0;
-
-       task = xchg(&server->tsk, NULL);
-       if (task)
-               force_sig(SIGKILL, task);
 }
 
 static struct TCP_Server_Info *
index 3db0c5fd9a1109629764cf5f2b759dd9ddd8cd8a..6cbd9c688cfe818a773a2a7f42c64ce59d693de1 100644 (file)
@@ -497,6 +497,14 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                goto out;
        }
 
+       if (file->f_flags & O_DIRECT &&
+           CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+               if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       file->f_op = &cifs_file_direct_nobrl_ops;
+               else
+                       file->f_op = &cifs_file_direct_ops;
+               }
+
        file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
        if (file_info == NULL) {
                if (server->ops->close)
index d5fec92e036068b98bf2e3426e3f8d159b70e048..7c018a1c52f7d474ce3b6d5ffb63c62485db831b 100644 (file)
@@ -467,6 +467,14 @@ int cifs_open(struct inode *inode, struct file *file)
        cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
                 inode, file->f_flags, full_path);
 
+       if (file->f_flags & O_DIRECT &&
+           cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       file->f_op = &cifs_file_direct_nobrl_ops;
+               else
+                       file->f_op = &cifs_file_direct_ops;
+       }
+
        if (server->oplocks)
                oplock = REQ_OPLOCK;
        else
index 949ec909ec9ab94747ed332cc96cdcdbc29fd47b..7899a40465b303db85e1903e9fec1fe57124a261 100644 (file)
@@ -1720,7 +1720,10 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
 unlink_target:
        /* Try unlinking the target dentry if it's not negative */
        if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
-               tmprc = cifs_unlink(target_dir, target_dentry);
+               if (d_is_dir(target_dentry))
+                       tmprc = cifs_rmdir(target_dir, target_dentry);
+               else
+                       tmprc = cifs_unlink(target_dir, target_dentry);
                if (tmprc)
                        goto cifs_rename_exit;
                rc = cifs_do_rename(xid, source_dentry, from_name,
index 798c80a41c886b3fba7e66f8264066902d35b8cb..b334a89d6a66eb2151973d6405083db3c254710b 100644 (file)
@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
                if (server->ops->dir_needs_close(cfile)) {
                        cfile->invalidHandle = true;
                        spin_unlock(&cifs_file_list_lock);
-                       if (server->ops->close)
-                               server->ops->close(xid, tcon, &cfile->fid);
+                       if (server->ops->close_dir)
+                               server->ops->close_dir(xid, tcon, &cfile->fid);
                } else
                        spin_unlock(&cifs_file_list_lock);
                if (cfile->srch_inf.ntwrk_buf_start) {
index 39ee32688eac93abb107eb07e5f5566237602ba8..3a5e83317683d1c16156c4511ab7657940b48d97 100644 (file)
@@ -243,10 +243,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
        kfree(ses->serverOS);
 
        ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
-       if (ses->serverOS)
+       if (ses->serverOS) {
                strncpy(ses->serverOS, bcc_ptr, len);
-       if (strncmp(ses->serverOS, "OS/2", 4) == 0)
-               cifs_dbg(FYI, "OS/2 server\n");
+               if (strncmp(ses->serverOS, "OS/2", 4) == 0)
+                       cifs_dbg(FYI, "OS/2 server\n");
+       }
 
        bcc_ptr += len + 1;
        bleft -= len + 1;
index 3f17b455083141c572fac5d88e95e56fbe54f89a..45992944e23859b1706fed3d0430ea0cee445a10 100644 (file)
@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                goto out;
        }
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL) {
                rc = -ENOMEM;
index 0150182a44946dcebdb9aae71130f7c8a479eab9..899bbc86f73e1416aafcd00708052c22bd87f52e 100644 (file)
@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        *adjust_tz = false;
        *symlink = false;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
index 5a48aa290dfe83fdab6f9af322aadcdd0ebe3b3b..f522193b7184facd4926cfa3447b7fcbf5268cce 100644 (file)
@@ -389,7 +389,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_file_all_info *smb2_data;
 
-       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+       smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
@@ -1035,7 +1035,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                if (keep_size == false)
                        return -EOPNOTSUPP;
 
-       /* 
+       /*
         * Must check if file sparse since fallocate -z (zero range) assumes
         * non-sparse allocation
         */
index fa0dd044213b9dc7dab5ec26d37c469627e9ca49..74b3a6684383c9bfd08f481c8575932cefa06766 100644 (file)
@@ -530,7 +530,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
        struct smb2_sess_setup_rsp *rsp = NULL;
        struct kvec iov[2];
        int rc = 0;
-       int resp_buftype;
+       int resp_buftype = CIFS_NO_BUFFER;
        __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
        struct TCP_Server_Info *server = ses->server;
        u16 blob_length = 0;
@@ -1403,8 +1403,7 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        rsp = (struct smb2_close_rsp *)iov[0].iov_base;
 
        if (rc != 0) {
-               if (tcon)
-                       cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+               cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
                goto close_exit;
        }
 
@@ -1533,7 +1532,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
 {
        return query_info(xid, tcon, persistent_fid, volatile_fid,
                          FILE_ALL_INFORMATION,
-                         sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+                         sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
                          sizeof(struct smb2_file_all_info), data);
 }
 
index d30ce699ae4b6ea4ac3ad1b8ec955fa271bdcb16..7a5b51440afa96d8caed1a8f023df3301dbcd3d7 100644 (file)
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
                                        unsigned int hash)
 {
        hash += (unsigned long) parent / L1_CACHE_BYTES;
-       hash = hash + (hash >> d_hash_shift);
-       return dentry_hashtable + (hash & d_hash_mask);
+       return dentry_hashtable + hash_32(hash, d_hash_shift);
 }
 
 /* Statistics gathering. */
@@ -2656,6 +2655,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
        dentry->d_parent = dentry;
        list_del_init(&dentry->d_u.d_child);
        anon->d_parent = dparent;
+       if (likely(!d_unhashed(anon))) {
+               hlist_bl_lock(&anon->d_sb->s_anon);
+               __hlist_bl_del(&anon->d_hash);
+               anon->d_hash.pprev = NULL;
+               hlist_bl_unlock(&anon->d_sb->s_anon);
+       }
        list_move(&anon->d_u.d_child, &dparent->d_subdirs);
 
        write_seqcount_end(&dentry->d_seq);
@@ -2714,7 +2719,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                        write_seqlock(&rename_lock);
                        __d_materialise_dentry(dentry, new);
                        write_sequnlock(&rename_lock);
-                       __d_drop(new);
                        _d_rehash(new);
                        spin_unlock(&new->d_lock);
                        spin_unlock(&inode->i_lock);
@@ -2778,7 +2782,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                                 * could splice into our tree? */
                                __d_materialise_dentry(dentry, alias);
                                write_sequnlock(&rename_lock);
-                               __d_drop(alias);
                                goto found;
                        } else {
                                /* Nope, but we must(!) avoid directory
index b10b48c2a7afaf859f2b508d5bca784abfe870ac..7bcfff900f058cd9d98278c1fa13ed2b0202eccc 100644 (file)
@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       ep_take_care_of_epollwakeup(&epds);
+       if (ep_op_has_event(op))
+               ep_take_care_of_epollwakeup(&epds);
 
        /*
         * We have to check that the file structure underneath the file descriptor
index 90a3cdca3f88b8d30adffe6b6dba52d613b1c9c2..603e4ebbd0ac1a8eb33f27f0e06cf302d3ce3352 100644 (file)
@@ -3240,6 +3240,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
+               new.bh = NULL;
                goto end_rename;
        }
        if (new.bh) {
@@ -3386,6 +3387,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
+               new.bh = NULL;
                goto end_rename;
        }
 
index bb0e80f03e2eb7291c91b9808f96d5469edfd903..1e43b905ff9854d7a6f45eab3da091b5867a0188 100644 (file)
@@ -575,6 +575,7 @@ handle_bb:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
                overhead = ext4_group_overhead_blocks(sb, group);
@@ -603,6 +604,7 @@ handle_ib:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
 
index 214fe1054fceef32e74589e06a8d89203d9cc601..736a348509f7d6cfc6041d4e7f8838972c3413e3 100644 (file)
@@ -23,7 +23,7 @@ config F2FS_STAT_FS
          mounted as f2fs. Each file shows the whole f2fs information.
 
          /sys/kernel/debug/f2fs/status includes:
-           - major file system information managed by f2fs currently
+           - major filesystem information managed by f2fs currently
            - average SIT information about whole segments
            - current memory footprint consumed by f2fs.
 
@@ -68,6 +68,6 @@ config F2FS_CHECK_FS
        bool "F2FS consistency checking feature"
        depends on F2FS_FS
        help
-         Enables BUG_ONs which check the file system consistency in runtime.
+         Enables BUG_ONs which check the filesystem consistency in runtime.
 
          If you want to improve the performance, say N.
index 6aeed5bada52b1e6bdf9d1bc8228c9e69fb35b13..ec3b7a5381fa3fca2165aa9bcdb7402c0eace5bc 100644 (file)
@@ -160,14 +160,11 @@ static int f2fs_write_meta_page(struct page *page,
                goto redirty_out;
        if (wbc->for_reclaim)
                goto redirty_out;
-
-       /* Should not write any meta pages, if any IO error was occurred */
-       if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
-               goto no_write;
+       if (unlikely(f2fs_cp_error(sbi)))
+               goto redirty_out;
 
        f2fs_wait_on_page_writeback(page, META);
        write_meta_page(sbi, page);
-no_write:
        dec_page_count(sbi, F2FS_DIRTY_META);
        unlock_page(page);
        return 0;
@@ -348,7 +345,7 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
        return e ? true : false;
 }
 
-static void release_dirty_inode(struct f2fs_sb_info *sbi)
+void release_dirty_inode(struct f2fs_sb_info *sbi)
 {
        struct ino_entry *e, *tmp;
        int i;
@@ -446,8 +443,8 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
        struct f2fs_orphan_block *orphan_blk = NULL;
        unsigned int nentries = 0;
        unsigned short index;
-       unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans +
-               (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
+       unsigned short orphan_blocks =
+                       (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans);
        struct page *page = NULL;
        struct ino_entry *orphan = NULL;
 
@@ -737,7 +734,7 @@ retry:
 /*
  * Freeze all the FS-operations for checkpoint.
  */
-static void block_operations(struct f2fs_sb_info *sbi)
+static int block_operations(struct f2fs_sb_info *sbi)
 {
        struct writeback_control wbc = {
                .sync_mode = WB_SYNC_ALL,
@@ -745,6 +742,7 @@ static void block_operations(struct f2fs_sb_info *sbi)
                .for_reclaim = 0,
        };
        struct blk_plug plug;
+       int err = 0;
 
        blk_start_plug(&plug);
 
@@ -754,11 +752,15 @@ retry_flush_dents:
        if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
                f2fs_unlock_all(sbi);
                sync_dirty_dir_inodes(sbi);
+               if (unlikely(f2fs_cp_error(sbi))) {
+                       err = -EIO;
+                       goto out;
+               }
                goto retry_flush_dents;
        }
 
        /*
-        * POR: we should ensure that there is no dirty node pages
+        * POR: we should ensure that there are no dirty node pages
         * until finishing nat/sit flush.
         */
 retry_flush_nodes:
@@ -767,9 +769,16 @@ retry_flush_nodes:
        if (get_pages(sbi, F2FS_DIRTY_NODES)) {
                up_write(&sbi->node_write);
                sync_node_pages(sbi, 0, &wbc);
+               if (unlikely(f2fs_cp_error(sbi))) {
+                       f2fs_unlock_all(sbi);
+                       err = -EIO;
+                       goto out;
+               }
                goto retry_flush_nodes;
        }
+out:
        blk_finish_plug(&plug);
+       return err;
 }
 
 static void unblock_operations(struct f2fs_sb_info *sbi)
@@ -813,8 +822,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
 
        /* Flush all the NAT/SIT pages */
-       while (get_pages(sbi, F2FS_DIRTY_META))
+       while (get_pages(sbi, F2FS_DIRTY_META)) {
                sync_meta_pages(sbi, META, LONG_MAX);
+               if (unlikely(f2fs_cp_error(sbi)))
+                       return;
+       }
 
        next_free_nid(sbi, &last_nid);
 
@@ -825,7 +837,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
        ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
        ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
-       for (i = 0; i < 3; i++) {
+       for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
                ckpt->cur_node_segno[i] =
                        cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
                ckpt->cur_node_blkoff[i] =
@@ -833,7 +845,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
                ckpt->alloc_type[i + CURSEG_HOT_NODE] =
                                curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
        }
-       for (i = 0; i < 3; i++) {
+       for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
                ckpt->cur_data_segno[i] =
                        cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
                ckpt->cur_data_blkoff[i] =
@@ -848,24 +860,23 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
 
        /* 2 cp  + n data seg summary + orphan inode blocks */
        data_sum_blocks = npages_for_summary_flush(sbi);
-       if (data_sum_blocks < 3)
+       if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
                set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
        else
                clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
 
-       orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
-                                       / F2FS_ORPHANS_PER_BLOCK;
+       orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans);
        ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
                        orphan_blocks);
 
        if (is_umount) {
                set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-               ckpt->cp_pack_total_block_count = cpu_to_le32(+
+               ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
                                cp_payload_blks + data_sum_blocks +
                                orphan_blocks + NR_CURSEG_NODE_TYPE);
        } else {
                clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-               ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+               ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
                                cp_payload_blks + data_sum_blocks +
                                orphan_blocks);
        }
@@ -924,6 +935,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        /* wait for previous submitted node/meta pages writeback */
        wait_on_all_pages_writeback(sbi);
 
+       if (unlikely(f2fs_cp_error(sbi)))
+               return;
+
        filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
        filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
 
@@ -934,15 +948,17 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        /* Here, we only have one bio having CP pack */
        sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
 
-       if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
-               clear_prefree_segments(sbi);
-               release_dirty_inode(sbi);
-               F2FS_RESET_SB_DIRT(sbi);
-       }
+       release_dirty_inode(sbi);
+
+       if (unlikely(f2fs_cp_error(sbi)))
+               return;
+
+       clear_prefree_segments(sbi);
+       F2FS_RESET_SB_DIRT(sbi);
 }
 
 /*
- * We guarantee that this checkpoint procedure should not fail.
+ * We guarantee that this checkpoint procedure will not fail.
  */
 void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
 {
@@ -952,7 +968,13 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
 
        mutex_lock(&sbi->cp_mutex);
-       block_operations(sbi);
+
+       if (!sbi->s_dirty)
+               goto out;
+       if (unlikely(f2fs_cp_error(sbi)))
+               goto out;
+       if (block_operations(sbi))
+               goto out;
 
        trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
 
@@ -976,9 +998,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        do_checkpoint(sbi, is_umount);
 
        unblock_operations(sbi);
-       mutex_unlock(&sbi->cp_mutex);
-
        stat_inc_cp_count(sbi->stat_info);
+out:
+       mutex_unlock(&sbi->cp_mutex);
        trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
 }
 
@@ -999,8 +1021,8 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi)
         * for cp pack we can have max 1020*504 orphan entries
         */
        sbi->n_orphans = 0;
-       sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
-                               * F2FS_ORPHANS_PER_BLOCK;
+       sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
+                       NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
 }
 
 int __init create_checkpoint_caches(void)
index 03313099c51c44b957f84fb6ec4a6ab698eee970..76de83e25a891d8cf85c4b84d2641dbf414a77e6 100644 (file)
@@ -53,7 +53,7 @@ static void f2fs_write_end_io(struct bio *bio, int err)
                struct page *page = bvec->bv_page;
 
                if (unlikely(err)) {
-                       SetPageError(page);
+                       set_page_dirty(page);
                        set_bit(AS_EIO, &page->mapping->flags);
                        f2fs_stop_checkpoint(sbi);
                }
@@ -691,7 +691,7 @@ get_next:
                        allocated = true;
                        blkaddr = dn.data_blkaddr;
                }
-               /* Give more consecutive addresses for the read ahead */
+               /* Give more consecutive addresses for the readahead */
                if (blkaddr == (bh_result->b_blocknr + ofs)) {
                        ofs++;
                        dn.ofs_in_node++;
@@ -739,7 +739,7 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
 
        trace_f2fs_readpage(page, DATA);
 
-       /* If the file has inline data, try to read it directlly */
+       /* If the file has inline data, try to read it directly */
        if (f2fs_has_inline_data(inode))
                ret = f2fs_read_inline_data(inode, page);
        else
@@ -836,10 +836,19 @@ write:
 
        /* Dentry blocks are controlled by checkpoint */
        if (S_ISDIR(inode->i_mode)) {
+               if (unlikely(f2fs_cp_error(sbi)))
+                       goto redirty_out;
                err = do_write_data_page(page, &fio);
                goto done;
        }
 
+       /* we should bypass data pages to proceed the kworkder jobs */
+       if (unlikely(f2fs_cp_error(sbi))) {
+               SetPageError(page);
+               unlock_page(page);
+               return 0;
+       }
+
        if (!wbc->for_reclaim)
                need_balance_fs = true;
        else if (has_not_enough_free_secs(sbi, 0))
@@ -927,7 +936,7 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
 
        if (to > inode->i_size) {
                truncate_pagecache(inode, inode->i_size);
-               truncate_blocks(inode, inode->i_size);
+               truncate_blocks(inode, inode->i_size, true);
        }
 }
 
@@ -946,7 +955,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 
        f2fs_balance_fs(sbi);
 repeat:
-       err = f2fs_convert_inline_data(inode, pos + len);
+       err = f2fs_convert_inline_data(inode, pos + len, NULL);
        if (err)
                goto fail;
 
index a441ba33be11bd2f12b1a948cad200ad22be33d6..fecebdbfd7810fbccf61c984c9e029c345b7fdef 100644 (file)
@@ -32,7 +32,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
        struct f2fs_stat_info *si = F2FS_STAT(sbi);
        int i;
 
-       /* valid check of the segment numbers */
+       /* validation check of the segment numbers */
        si->hit_ext = sbi->read_hit_ext;
        si->total_ext = sbi->total_hit_ext;
        si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
@@ -152,7 +152,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
        si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
        si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
 
-       /* buld nm */
+       /* build nm */
        si->base_mem += sizeof(struct f2fs_nm_info);
        si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
 
index bcf893c3d9036d0eb743ff8c34bae2eda23e9582..155fb056b7f1ab0424beb480b328bd12d0c18aff 100644 (file)
@@ -124,7 +124,7 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
 
                /*
                 * For the most part, it should be a bug when name_len is zero.
-                * We stop here for figuring out where the bugs are occurred.
+                * We stop here for figuring out where the bugs has occurred.
                 */
                f2fs_bug_on(!de->name_len);
 
@@ -391,7 +391,7 @@ put_error:
 error:
        /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
        truncate_inode_pages(&inode->i_data, 0);
-       truncate_blocks(inode, 0);
+       truncate_blocks(inode, 0, false);
        remove_dirty_dir_inode(inode);
        remove_inode_page(inode);
        return ERR_PTR(err);
@@ -563,7 +563,7 @@ fail:
 }
 
 /*
- * It only removes the dentry from the dentry page,corresponding name
+ * It only removes the dentry from the dentry page, corresponding name
  * entry in name page does not need to be touched during deletion.
  */
 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
index 4dab5338a97af496ad6f5ee5867a6e6457a03ec5..e921242186f6a1e8af2a128460db45b064027b67 100644 (file)
@@ -24,7 +24,7 @@
 #define f2fs_bug_on(condition) BUG_ON(condition)
 #define f2fs_down_write(x, y)  down_write_nest_lock(x, y)
 #else
-#define f2fs_bug_on(condition)
+#define f2fs_bug_on(condition) WARN_ON(condition)
 #define f2fs_down_write(x, y)  down_write(x)
 #endif
 
@@ -395,7 +395,7 @@ enum count_type {
 };
 
 /*
- * The below are the page types of bios used in submti_bio().
+ * The below are the page types of bios used in submit_bio().
  * The available types are:
  * DATA                        User data pages. It operates as async mode.
  * NODE                        Node pages. It operates as async mode.
@@ -470,7 +470,7 @@ struct f2fs_sb_info {
        struct list_head dir_inode_list;        /* dir inode list */
        spinlock_t dir_inode_lock;              /* for dir inode list lock */
 
-       /* basic file system units */
+       /* basic filesystem units */
        unsigned int log_sectors_per_block;     /* log2 sectors per block */
        unsigned int log_blocksize;             /* log2 block size */
        unsigned int blocksize;                 /* block size */
@@ -799,7 +799,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
 
        /*
         * odd numbered checkpoint should at cp segment 0
-        * and even segent must be at cp segment 1
+        * and even segment must be at cp segment 1
         */
        if (!(ckpt_version & 1))
                start_addr += sbi->blocks_per_seg;
@@ -1096,6 +1096,11 @@ static inline int f2fs_readonly(struct super_block *sb)
        return sb->s_flags & MS_RDONLY;
 }
 
+static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+{
+       return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+}
+
 static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
 {
        set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
@@ -1117,7 +1122,7 @@ static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
  */
 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 void truncate_data_blocks(struct dnode_of_data *);
-int truncate_blocks(struct inode *, u64);
+int truncate_blocks(struct inode *, u64, bool);
 void f2fs_truncate(struct inode *);
 int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int f2fs_setattr(struct dentry *, struct iattr *);
@@ -1202,10 +1207,8 @@ int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
-void recover_node_page(struct f2fs_sb_info *, struct page *,
-               struct f2fs_summary *, struct node_info *, block_t);
 void recover_inline_xattr(struct inode *, struct page *);
-bool recover_xattr_data(struct inode *, struct page *, block_t);
+void recover_xattr_data(struct inode *, struct page *, block_t);
 int recover_inode_page(struct f2fs_sb_info *, struct page *);
 int restore_node_summary(struct f2fs_sb_info *, unsigned int,
                                struct f2fs_summary_block *);
@@ -1238,8 +1241,6 @@ void write_data_page(struct page *, struct dnode_of_data *, block_t *,
 void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
 void recover_data_page(struct f2fs_sb_info *, struct page *,
                                struct f2fs_summary *, block_t, block_t);
-void rewrite_node_page(struct f2fs_sb_info *, struct page *,
-                               struct f2fs_summary *, block_t, block_t);
 void allocate_data_block(struct f2fs_sb_info *, struct page *,
                block_t, block_t *, struct f2fs_summary *, int);
 void f2fs_wait_on_page_writeback(struct page *, enum page_type);
@@ -1262,6 +1263,7 @@ int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
 void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
 void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
+void release_dirty_inode(struct f2fs_sb_info *);
 bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
 int acquire_orphan_inode(struct f2fs_sb_info *);
 void release_orphan_inode(struct f2fs_sb_info *);
@@ -1439,8 +1441,8 @@ extern const struct inode_operations f2fs_special_inode_operations;
  */
 bool f2fs_may_inline(struct inode *);
 int f2fs_read_inline_data(struct inode *, struct page *);
-int f2fs_convert_inline_data(struct inode *, pgoff_t);
+int f2fs_convert_inline_data(struct inode *, pgoff_t, struct page *);
 int f2fs_write_inline_data(struct inode *, struct page *, unsigned int);
 void truncate_inline_data(struct inode *, u64);
-int recover_inline_data(struct inode *, struct page *);
+bool recover_inline_data(struct inode *, struct page *);
 #endif
index 208f1a9bd569b6d2c7afafa20c5a5dff9f9b39d1..060aee65aee801d61f7ab5f6b2666a60c0c3363a 100644 (file)
@@ -41,6 +41,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
 
        sb_start_pagefault(inode->i_sb);
 
+       /* force to convert with normal data indices */
+       err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page);
+       if (err)
+               goto out;
+
        /* block allocation */
        f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -110,6 +115,25 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
        return 1;
 }
 
+static inline bool need_do_checkpoint(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       bool need_cp = false;
+
+       if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
+               need_cp = true;
+       else if (file_wrong_pino(inode))
+               need_cp = true;
+       else if (!space_for_roll_forward(sbi))
+               need_cp = true;
+       else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
+               need_cp = true;
+       else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
+               need_cp = true;
+
+       return need_cp;
+}
+
 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct inode *inode = file->f_mapping->host;
@@ -154,23 +178,12 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        /* guarantee free sections for fsync */
        f2fs_balance_fs(sbi);
 
-       down_read(&fi->i_sem);
-
        /*
         * Both of fdatasync() and fsync() are able to be recovered from
         * sudden-power-off.
         */
-       if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
-               need_cp = true;
-       else if (file_wrong_pino(inode))
-               need_cp = true;
-       else if (!space_for_roll_forward(sbi))
-               need_cp = true;
-       else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
-               need_cp = true;
-       else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
-               need_cp = true;
-
+       down_read(&fi->i_sem);
+       need_cp = need_do_checkpoint(inode);
        up_read(&fi->i_sem);
 
        if (need_cp) {
@@ -288,7 +301,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
                if (err && err != -ENOENT) {
                        goto fail;
                } else if (err == -ENOENT) {
-                       /* direct node is not exist */
+                       /* direct node does not exists */
                        if (whence == SEEK_DATA) {
                                pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
                                                        F2FS_I(inode));
@@ -417,7 +430,7 @@ out:
        f2fs_put_page(page, 1);
 }
 
-int truncate_blocks(struct inode *inode, u64 from)
+int truncate_blocks(struct inode *inode, u64 from, bool lock)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        unsigned int blocksize = inode->i_sb->s_blocksize;
@@ -433,14 +446,16 @@ int truncate_blocks(struct inode *inode, u64 from)
        free_from = (pgoff_t)
                        ((from + blocksize - 1) >> (sbi->log_blocksize));
 
-       f2fs_lock_op(sbi);
+       if (lock)
+               f2fs_lock_op(sbi);
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
        if (err) {
                if (err == -ENOENT)
                        goto free_next;
-               f2fs_unlock_op(sbi);
+               if (lock)
+                       f2fs_unlock_op(sbi);
                trace_f2fs_truncate_blocks_exit(inode, err);
                return err;
        }
@@ -458,7 +473,8 @@ int truncate_blocks(struct inode *inode, u64 from)
        f2fs_put_dnode(&dn);
 free_next:
        err = truncate_inode_blocks(inode, free_from);
-       f2fs_unlock_op(sbi);
+       if (lock)
+               f2fs_unlock_op(sbi);
 done:
        /* lastly zero out the first data page */
        truncate_partial_data_page(inode, from);
@@ -475,7 +491,7 @@ void f2fs_truncate(struct inode *inode)
 
        trace_f2fs_truncate(inode);
 
-       if (!truncate_blocks(inode, i_size_read(inode))) {
+       if (!truncate_blocks(inode, i_size_read(inode), true)) {
                inode->i_mtime = inode->i_ctime = CURRENT_TIME;
                mark_inode_dirty(inode);
        }
@@ -533,7 +549,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if ((attr->ia_valid & ATTR_SIZE) &&
                        attr->ia_size != i_size_read(inode)) {
-               err = f2fs_convert_inline_data(inode, attr->ia_size);
+               err = f2fs_convert_inline_data(inode, attr->ia_size, NULL);
                if (err)
                        return err;
 
@@ -622,7 +638,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
        loff_t off_start, off_end;
        int ret = 0;
 
-       ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1);
+       ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL);
        if (ret)
                return ret;
 
@@ -678,7 +694,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
        if (ret)
                return ret;
 
-       ret = f2fs_convert_inline_data(inode, offset + len);
+       ret = f2fs_convert_inline_data(inode, offset + len, NULL);
        if (ret)
                return ret;
 
index d7947d90ccc3df487919723578bf5f81dde2d8a6..943a31db7cc3679c117716b220a58cce1b50a12d 100644 (file)
@@ -58,7 +58,7 @@ static int gc_thread_func(void *data)
                 * 3. IO subsystem is idle by checking the # of requests in
                 *    bdev's request list.
                 *
-                * Note) We have to avoid triggering GCs too much frequently.
+                * Note) We have to avoid triggering GCs frequently.
                 * Because it is possible that some segments can be
                 * invalidated soon after by user update or deletion.
                 * So, I'd like to wait some time to collect dirty segments.
@@ -222,7 +222,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 
        u = (vblocks * 100) >> sbi->log_blocks_per_seg;
 
-       /* Handle if the system time is changed by user */
+       /* Handle if the system time has changed by the user */
        if (mtime < sit_i->min_mtime)
                sit_i->min_mtime = mtime;
        if (mtime > sit_i->max_mtime)
@@ -593,7 +593,7 @@ next_step:
 
                if (phase == 2) {
                        inode = f2fs_iget(sb, dni.ino);
-                       if (IS_ERR(inode))
+                       if (IS_ERR(inode) || is_bad_inode(inode))
                                continue;
 
                        start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
@@ -693,7 +693,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
 gc_more:
        if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
                goto stop;
-       if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
+       if (unlikely(f2fs_cp_error(sbi)))
                goto stop;
 
        if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
index 5d5eb6047bf467a4c27fba131acdf7ffbff4ab20..16f0b2b22999ffbc3c8ed46afe0e042d4748ea14 100644 (file)
@@ -91,7 +91,7 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
        block_t invalid_user_blocks = sbi->user_block_count -
                                        written_block_count(sbi);
        /*
-        * Background GC is triggered with the following condition.
+        * Background GC is triggered with the following conditions.
         * 1. There are a number of invalid blocks.
         * 2. There is not enough free space.
         */
index 948d17bf7281f6c9c32cf1e889dc844e57fbe893..a844fcfb9a8dcc70859e44c8933c465d9be36fb9 100644 (file)
@@ -42,7 +42,8 @@ static void TEA_transform(unsigned int buf[4], unsigned int const in[])
        buf[1] += b1;
 }
 
-static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num)
+static void str2hashbuf(const unsigned char *msg, size_t len,
+                               unsigned int *buf, int num)
 {
        unsigned pad, val;
        int i;
@@ -73,9 +74,9 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
 {
        __u32 hash;
        f2fs_hash_t f2fs_hash;
-       const char *p;
+       const unsigned char *p;
        __u32 in[8], buf[4];
-       const char *name = name_info->name;
+       const unsigned char *name = name_info->name;
        size_t len = name_info->len;
 
        if ((len <= 2) && (name[0] == '.') &&
index 5beeccef9ae1bf968c29f688e4b70664e6f8ce82..3e8ecdf3742b18d72eba2e9c780f2c6e8d215c5e 100644 (file)
@@ -68,7 +68,7 @@ out:
 
 static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
 {
-       int err;
+       int err = 0;
        struct page *ipage;
        struct dnode_of_data dn;
        void *src_addr, *dst_addr;
@@ -86,6 +86,10 @@ static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
                goto out;
        }
 
+       /* someone else converted inline_data already */
+       if (!f2fs_has_inline_data(inode))
+               goto out;
+
        /*
         * i_addr[0] is not used for inline data,
         * so reserving new block will not destroy inline data
@@ -124,9 +128,10 @@ out:
        return err;
 }
 
-int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
+int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size,
+                                               struct page *page)
 {
-       struct page *page;
+       struct page *new_page = page;
        int err;
 
        if (!f2fs_has_inline_data(inode))
@@ -134,17 +139,20 @@ int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
        else if (to_size <= MAX_INLINE_DATA)
                return 0;
 
-       page = grab_cache_page(inode->i_mapping, 0);
-       if (!page)
-               return -ENOMEM;
+       if (!page || page->index != 0) {
+               new_page = grab_cache_page(inode->i_mapping, 0);
+               if (!new_page)
+                       return -ENOMEM;
+       }
 
-       err = __f2fs_convert_inline_data(inode, page);
-       f2fs_put_page(page, 1);
+       err = __f2fs_convert_inline_data(inode, new_page);
+       if (!page || page->index != 0)
+               f2fs_put_page(new_page, 1);
        return err;
 }
 
 int f2fs_write_inline_data(struct inode *inode,
-                          struct page *page, unsigned size)
+                               struct page *page, unsigned size)
 {
        void *src_addr, *dst_addr;
        struct page *ipage;
@@ -199,7 +207,7 @@ void truncate_inline_data(struct inode *inode, u64 from)
        f2fs_put_page(ipage, 1);
 }
 
-int recover_inline_data(struct inode *inode, struct page *npage)
+bool recover_inline_data(struct inode *inode, struct page *npage)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct f2fs_inode *ri = NULL;
@@ -218,7 +226,7 @@ int recover_inline_data(struct inode *inode, struct page *npage)
                ri = F2FS_INODE(npage);
 
        if (f2fs_has_inline_data(inode) &&
-                       ri && ri->i_inline & F2FS_INLINE_DATA) {
+                       ri && (ri->i_inline & F2FS_INLINE_DATA)) {
 process_inline:
                ipage = get_node_page(sbi, inode->i_ino);
                f2fs_bug_on(IS_ERR(ipage));
@@ -230,7 +238,7 @@ process_inline:
                memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
                update_inode(inode, ipage);
                f2fs_put_page(ipage, 1);
-               return -1;
+               return true;
        }
 
        if (f2fs_has_inline_data(inode)) {
@@ -242,10 +250,10 @@ process_inline:
                clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
                update_inode(inode, ipage);
                f2fs_put_page(ipage, 1);
-       } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
-               truncate_blocks(inode, 0);
+       } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+               truncate_blocks(inode, 0, false);
                set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
                goto process_inline;
        }
-       return 0;
+       return false;
 }
index 27b03776ffd21b8377dd645ba15591c1e783c19e..ee103fd7283c4e26acb39abf80fdaacdafea116f 100644 (file)
@@ -134,9 +134,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        return 0;
 out:
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        alloc_nid_failed(sbi, ino);
        return err;
 }
@@ -229,7 +227,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        f2fs_delete_entry(de, page, inode);
        f2fs_unlock_op(sbi);
 
-       /* In order to evict this inode,  we set it dirty */
+       /* In order to evict this inode, we set it dirty */
        mark_inode_dirty(inode);
 fail:
        trace_f2fs_unlink_exit(inode, err);
@@ -267,9 +265,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
        return err;
 out:
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        alloc_nid_failed(sbi, inode->i_ino);
        return err;
 }
@@ -308,9 +304,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 out_fail:
        clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        alloc_nid_failed(sbi, inode->i_ino);
        return err;
 }
@@ -354,9 +348,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
        return 0;
 out:
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        alloc_nid_failed(sbi, inode->i_ino);
        return err;
 }
@@ -688,9 +680,7 @@ release_out:
 out:
        f2fs_unlock_op(sbi);
        clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        alloc_nid_failed(sbi, inode->i_ino);
        return err;
 }
@@ -704,7 +694,6 @@ const struct inode_operations f2fs_dir_inode_operations = {
        .mkdir          = f2fs_mkdir,
        .rmdir          = f2fs_rmdir,
        .mknod          = f2fs_mknod,
-       .rename         = f2fs_rename,
        .rename2        = f2fs_rename2,
        .tmpfile        = f2fs_tmpfile,
        .getattr        = f2fs_getattr,
index d3d90d2846313d546c1328145d48a2013645f3dd..45378196e19acbab2b2d3150a5158681eaa4a045 100644 (file)
@@ -237,7 +237,7 @@ retry:
                        nat_get_blkaddr(e) != NULL_ADDR &&
                        new_blkaddr == NEW_ADDR);
 
-       /* increament version no as node is removed */
+       /* increment version no as node is removed */
        if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
                unsigned char version = nat_get_version(e);
                nat_set_version(e, inc_node_version(version));
@@ -274,7 +274,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 }
 
 /*
- * This function returns always success
+ * This function always returns success
  */
 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
 {
@@ -650,7 +650,7 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
 
        /* get indirect nodes in the path */
        for (i = 0; i < idx + 1; i++) {
-               /* refernece count'll be increased */
+               /* reference count'll be increased */
                pages[i] = get_node_page(sbi, nid[i]);
                if (IS_ERR(pages[i])) {
                        err = PTR_ERR(pages[i]);
@@ -823,22 +823,26 @@ int truncate_xattr_node(struct inode *inode, struct page *page)
  */
 void remove_inode_page(struct inode *inode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct page *page;
-       nid_t ino = inode->i_ino;
        struct dnode_of_data dn;
 
-       page = get_node_page(sbi, ino);
-       if (IS_ERR(page))
+       set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
+       if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
                return;
 
-       if (truncate_xattr_node(inode, page)) {
-               f2fs_put_page(page, 1);
+       if (truncate_xattr_node(inode, dn.inode_page)) {
+               f2fs_put_dnode(&dn);
                return;
        }
-       /* 0 is possible, after f2fs_new_inode() is failed */
+
+       /* remove potential inline_data blocks */
+       if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                               S_ISLNK(inode->i_mode))
+               truncate_data_blocks_range(&dn, 1);
+
+       /* 0 is possible, after f2fs_new_inode() has failed */
        f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
-       set_new_dnode(&dn, inode, page, page, ino);
+
+       /* will put inode & node pages */
        truncate_node(&dn);
 }
 
@@ -1129,8 +1133,11 @@ continue_unlock:
                                set_fsync_mark(page, 0);
                                set_dentry_mark(page, 0);
                        }
-                       NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
-                       wrote++;
+
+                       if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
+                               unlock_page(page);
+                       else
+                               wrote++;
 
                        if (--wbc->nr_to_write == 0)
                                break;
@@ -1212,6 +1219,8 @@ static int f2fs_write_node_page(struct page *page,
 
        if (unlikely(sbi->por_doing))
                goto redirty_out;
+       if (unlikely(f2fs_cp_error(sbi)))
+               goto redirty_out;
 
        f2fs_wait_on_page_writeback(page, NODE);
 
@@ -1540,15 +1549,6 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
                kmem_cache_free(free_nid_slab, i);
 }
 
-void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
-               struct f2fs_summary *sum, struct node_info *ni,
-               block_t new_blkaddr)
-{
-       rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
-       set_node_addr(sbi, ni, new_blkaddr, false);
-       clear_node_page_dirty(page);
-}
-
 void recover_inline_xattr(struct inode *inode, struct page *page)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -1557,40 +1557,33 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
        struct page *ipage;
        struct f2fs_inode *ri;
 
-       if (!f2fs_has_inline_xattr(inode))
-               return;
-
-       if (!IS_INODE(page))
-               return;
-
-       ri = F2FS_INODE(page);
-       if (!(ri->i_inline & F2FS_INLINE_XATTR))
-               return;
-
        ipage = get_node_page(sbi, inode->i_ino);
        f2fs_bug_on(IS_ERR(ipage));
 
+       ri = F2FS_INODE(page);
+       if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
+               clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
+               goto update_inode;
+       }
+
        dst_addr = inline_xattr_addr(ipage);
        src_addr = inline_xattr_addr(page);
        inline_size = inline_xattr_size(inode);
 
        f2fs_wait_on_page_writeback(ipage, NODE);
        memcpy(dst_addr, src_addr, inline_size);
-
+update_inode:
        update_inode(inode, ipage);
        f2fs_put_page(ipage, 1);
 }
 
-bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
+void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
        nid_t new_xnid = nid_of_node(page);
        struct node_info ni;
 
-       if (!f2fs_has_xattr_block(ofs_of_node(page)))
-               return false;
-
        /* 1: invalidate the previous xattr nid */
        if (!prev_xnid)
                goto recover_xnid;
@@ -1618,7 +1611,6 @@ recover_xnid:
        set_node_addr(sbi, &ni, blkaddr, false);
 
        update_inode_page(inode);
-       return true;
 }
 
 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -1637,7 +1629,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
        if (!ipage)
                return -ENOMEM;
 
-       /* Should not use this inode  from free nid list */
+       /* Should not use this inode from free nid list */
        remove_free_nid(NM_I(sbi), ino);
 
        SetPageUptodate(ipage);
@@ -1651,6 +1643,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
        dst->i_blocks = cpu_to_le64(1);
        dst->i_links = cpu_to_le32(1);
        dst->i_xattr_nid = 0;
+       dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
 
        new_ni = old_ni;
        new_ni.ino = ino;
@@ -1659,13 +1652,14 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
                WARN_ON(1);
        set_node_addr(sbi, &new_ni, NEW_ADDR, false);
        inc_valid_inode_count(sbi);
+       set_page_dirty(ipage);
        f2fs_put_page(ipage, 1);
        return 0;
 }
 
 /*
  * ra_sum_pages() merge contiguous pages into one bio and submit.
- * these pre-readed pages are alloced in bd_inode's mapping tree.
+ * these pre-read pages are allocated in bd_inode's mapping tree.
  */
 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
                                int start, int nrpages)
@@ -1709,7 +1703,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
        for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
                nrpages = min(last_offset - i, bio_blocks);
 
-               /* read ahead node pages */
+               /* readahead node pages */
                nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
                if (!nrpages)
                        return -ENOMEM;
@@ -1967,7 +1961,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
 
        /* not used nids: 0, node, meta, (and root counted as valid node) */
-       nm_i->available_nids = nm_i->max_nid - 3;
+       nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
        nm_i->fcnt = 0;
        nm_i->nat_cnt = 0;
        nm_i->ram_thresh = DEF_RAM_THRESHOLD;
index fe1c6d921ba2fb535464d031d5822877e6eeccbd..756c41cd25829f789931df79f14a95bebcba5431 100644 (file)
@@ -62,8 +62,10 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
        }
 retry:
        de = f2fs_find_entry(dir, &name, &page);
-       if (de && inode->i_ino == le32_to_cpu(de->ino))
+       if (de && inode->i_ino == le32_to_cpu(de->ino)) {
+               clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
                goto out_unmap_put;
+       }
        if (de) {
                einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
                if (IS_ERR(einode)) {
@@ -300,14 +302,19 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        struct node_info ni;
        int err = 0, recovered = 0;
 
-       recover_inline_xattr(inode, page);
-
-       if (recover_inline_data(inode, page))
+       /* step 1: recover xattr */
+       if (IS_INODE(page)) {
+               recover_inline_xattr(inode, page);
+       } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
+               recover_xattr_data(inode, page, blkaddr);
                goto out;
+       }
 
-       if (recover_xattr_data(inode, page, blkaddr))
+       /* step 2: recover inline data */
+       if (recover_inline_data(inode, page))
                goto out;
 
+       /* step 3: recover data indices */
        start = start_bidx_of_node(ofs_of_node(page), fi);
        end = start + ADDRS_PER_PAGE(page, fi);
 
@@ -364,8 +371,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        fill_node_footer(dn.node_page, dn.nid, ni.ino,
                                        ofs_of_node(page), false);
        set_page_dirty(dn.node_page);
-
-       recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
 err:
        f2fs_put_dnode(&dn);
        f2fs_unlock_op(sbi);
@@ -452,6 +457,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
        /* step #1: find fsynced inode numbers */
        sbi->por_doing = true;
 
+       /* prevent checkpoint */
+       mutex_lock(&sbi->cp_mutex);
+
        blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
        err = find_fsync_dnodes(sbi, &inode_list);
@@ -465,7 +473,8 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
 
        /* step #2: recover data */
        err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
-       f2fs_bug_on(!list_empty(&inode_list));
+       if (!err)
+               f2fs_bug_on(!list_empty(&inode_list));
 out:
        destroy_fsync_dnodes(&inode_list);
        kmem_cache_destroy(fsync_entry_slab);
@@ -482,8 +491,13 @@ out:
                /* Flush all the NAT/SIT pages */
                while (get_pages(sbi, F2FS_DIRTY_META))
                        sync_meta_pages(sbi, META, LONG_MAX);
+               set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+               mutex_unlock(&sbi->cp_mutex);
        } else if (need_writecp) {
+               mutex_unlock(&sbi->cp_mutex);
                write_checkpoint(sbi, false);
+       } else {
+               mutex_unlock(&sbi->cp_mutex);
        }
        return err;
 }
index 0dfeebae2a50f1d8ff4b7a33e19a25fb32c2e834..0aa337cd5bba85703bdcecde7fe0a35de76bf121 100644 (file)
@@ -62,7 +62,7 @@ static inline unsigned long __reverse_ffs(unsigned long word)
 }
 
 /*
- * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
+ * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  * f2fs_set_bit makes MSB and LSB reversed in a byte.
  * Example:
  *                             LSB <--> MSB
@@ -808,7 +808,7 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
 }
 
 /*
- * This function always allocates a used segment (from dirty seglist) by SSR
+ * This function always allocates a used segment(from dirty seglist) by SSR
  * manner, so it should recover the existing segment information of valid blocks
  */
 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
@@ -1103,55 +1103,6 @@ void recover_data_page(struct f2fs_sb_info *sbi,
        mutex_unlock(&curseg->curseg_mutex);
 }
 
-void rewrite_node_page(struct f2fs_sb_info *sbi,
-                       struct page *page, struct f2fs_summary *sum,
-                       block_t old_blkaddr, block_t new_blkaddr)
-{
-       struct sit_info *sit_i = SIT_I(sbi);
-       int type = CURSEG_WARM_NODE;
-       struct curseg_info *curseg;
-       unsigned int segno, old_cursegno;
-       block_t next_blkaddr = next_blkaddr_of_node(page);
-       unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
-       struct f2fs_io_info fio = {
-               .type = NODE,
-               .rw = WRITE_SYNC,
-       };
-
-       curseg = CURSEG_I(sbi, type);
-
-       mutex_lock(&curseg->curseg_mutex);
-       mutex_lock(&sit_i->sentry_lock);
-
-       segno = GET_SEGNO(sbi, new_blkaddr);
-       old_cursegno = curseg->segno;
-
-       /* change the current segment */
-       if (segno != curseg->segno) {
-               curseg->next_segno = segno;
-               change_curseg(sbi, type, true);
-       }
-       curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
-       __add_sum_entry(sbi, type, sum);
-
-       /* change the current log to the next block addr in advance */
-       if (next_segno != segno) {
-               curseg->next_segno = next_segno;
-               change_curseg(sbi, type, true);
-       }
-       curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, next_blkaddr);
-
-       /* rewrite node page */
-       set_page_writeback(page);
-       f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio);
-       f2fs_submit_merged_bio(sbi, NODE, WRITE);
-       refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
-       locate_dirty_segment(sbi, old_cursegno);
-
-       mutex_unlock(&sit_i->sentry_lock);
-       mutex_unlock(&curseg->curseg_mutex);
-}
-
 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
                                        struct page *page, enum page_type type)
 {
index 55973f7b0330be844e17c0f02b7c02d661ca8c3e..ff483257283b8789968473b4beda6c2ddbaff43b 100644 (file)
@@ -549,7 +549,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
 }
 
 /*
- * Summary block is always treated as invalid block
+ * Summary block is always treated as an invalid block
  */
 static inline void check_block_count(struct f2fs_sb_info *sbi,
                int segno, struct f2fs_sit_entry *raw_sit)
index 657582fc7601ad27f81ef034a6f5e55cf46abc1e..41bdf511003deb6978cf852203906840167f90b6 100644 (file)
@@ -432,9 +432,15 @@ static void f2fs_put_super(struct super_block *sb)
        stop_gc_thread(sbi);
 
        /* We don't need to do checkpoint when it's clean */
-       if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
+       if (sbi->s_dirty)
                write_checkpoint(sbi, true);
 
+       /*
+        * normally superblock is clean, so we need to release this.
+        * In addition, EIO will skip do checkpoint, we need this as well.
+        */
+       release_dirty_inode(sbi);
+
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
 
@@ -457,9 +463,6 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
 
        trace_f2fs_sync_fs(sb, sync);
 
-       if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
-               return 0;
-
        if (sync) {
                mutex_lock(&sbi->gc_mutex);
                write_checkpoint(sbi, false);
@@ -505,8 +508,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
        buf->f_bavail = user_block_count - valid_user_blocks(sbi);
 
-       buf->f_files = sbi->total_node_count;
-       buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
+       buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+       buf->f_ffree = buf->f_files - valid_inode_count(sbi);
 
        buf->f_namelen = F2FS_NAME_LEN;
        buf->f_fsid.val[0] = (u32)id;
@@ -663,7 +666,7 @@ restore_gc:
        if (need_restart_gc) {
                if (start_gc_thread(sbi))
                        f2fs_msg(sbi->sb, KERN_WARNING,
-                               "background gc thread is stop");
+                               "background gc thread has stopped");
        } else if (need_stop_gc) {
                stop_gc_thread(sbi);
        }
@@ -812,7 +815,7 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
        if (unlikely(fsmeta >= total))
                return 1;
 
-       if (unlikely(is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) {
+       if (unlikely(f2fs_cp_error(sbi))) {
                f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
                return 1;
        }
@@ -899,8 +902,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        struct buffer_head *raw_super_buf;
        struct inode *root;
        long err = -EINVAL;
+       bool retry = true;
        int i;
 
+try_onemore:
        /* allocate memory for f2fs-specific super block info */
        sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
        if (!sbi)
@@ -1080,9 +1085,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        /* recover fsynced data */
        if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
                err = recover_fsync_data(sbi);
-               if (err)
+               if (err) {
                        f2fs_msg(sb, KERN_ERR,
                                "Cannot recover all fsync data errno=%ld", err);
+                       goto free_kobj;
+               }
        }
 
        /*
@@ -1123,6 +1130,13 @@ free_sb_buf:
        brelse(raw_super_buf);
 free_sbi:
        kfree(sbi);
+
+       /* give only one another chance */
+       if (retry) {
+               retry = 0;
+               shrink_dcache_sb(sb);
+               goto try_onemore;
+       }
        return err;
 }
 
index 8bea941ee309607647cbc7a8c1282e0419dcef12..728a5dc3dc1654190e9709ce1f19fbc87a39d843 100644 (file)
@@ -528,7 +528,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
                int free;
                /*
                 * If value is NULL, it is remove operation.
-                * In case of update operation, we caculate free.
+                * In case of update operation, we calculate free.
                 */
                free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
                if (found)
index 8f27c93f8d2ed88edd55e623baac11b6f94ccfd5..ec9e082f9ecd905af16f2dd09de37822376cfe66 100644 (file)
@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
 
        error = make_socks(serv, net);
        if (error < 0)
-               goto err_socks;
+               goto err_bind;
        set_grace_period(net);
        dprintk("lockd_up_net: per-net data created; net=%p\n", net);
        return 0;
 
-err_socks:
-       svc_rpcb_cleanup(serv, net);
 err_bind:
        ln->nlmsvc_users--;
        return error;
index a996bb48dfabf4f645dced56f527ed3d5568bb2c..215e44254c5328c1a992db483da80e27a1dbed6c 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/device_cgroup.h>
 #include <linux/fs_struct.h>
 #include <linux/posix_acl.h>
+#include <linux/hash.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -643,24 +644,22 @@ static int complete_walk(struct nameidata *nd)
 
 static __always_inline void set_root(struct nameidata *nd)
 {
-       if (!nd->root.mnt)
-               get_fs_root(current->fs, &nd->root);
+       get_fs_root(current->fs, &nd->root);
 }
 
 static int link_path_walk(const char *, struct nameidata *);
 
-static __always_inline void set_root_rcu(struct nameidata *nd)
+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
 {
-       if (!nd->root.mnt) {
-               struct fs_struct *fs = current->fs;
-               unsigned seq;
+       struct fs_struct *fs = current->fs;
+       unsigned seq, res;
 
-               do {
-                       seq = read_seqcount_begin(&fs->seq);
-                       nd->root = fs->root;
-                       nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-               } while (read_seqcount_retry(&fs->seq, seq));
-       }
+       do {
+               seq = read_seqcount_begin(&fs->seq);
+               nd->root = fs->root;
+               res = __read_seqcount_begin(&nd->root.dentry->d_seq);
+       } while (read_seqcount_retry(&fs->seq, seq));
+       return res;
 }
 
 static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -860,7 +859,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
                        return PTR_ERR(s);
                }
                if (*s == '/') {
-                       set_root(nd);
+                       if (!nd->root.mnt)
+                               set_root(nd);
                        path_put(&nd->path);
                        nd->path = nd->root;
                        path_get(&nd->root);
@@ -1137,13 +1137,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                 */
                *inode = path->dentry->d_inode;
        }
-       return read_seqretry(&mount_lock, nd->m_seq) &&
+       return !read_seqretry(&mount_lock, nd->m_seq) &&
                !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
 }
 
 static int follow_dotdot_rcu(struct nameidata *nd)
 {
-       set_root_rcu(nd);
+       struct inode *inode = nd->inode;
+       if (!nd->root.mnt)
+               set_root_rcu(nd);
 
        while (1) {
                if (nd->path.dentry == nd->root.dentry &&
@@ -1155,6 +1157,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        struct dentry *parent = old->d_parent;
                        unsigned seq;
 
+                       inode = parent->d_inode;
                        seq = read_seqcount_begin(&parent->d_seq);
                        if (read_seqcount_retry(&old->d_seq, nd->seq))
                                goto failed;
@@ -1164,6 +1167,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                }
                if (!follow_up_rcu(&nd->path))
                        break;
+               inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
        }
        while (d_mountpoint(nd->path.dentry)) {
@@ -1173,11 +1177,12 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        break;
                nd->path.mnt = &mounted->mnt;
                nd->path.dentry = mounted->mnt.mnt_root;
+               inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
-               if (!read_seqretry(&mount_lock, nd->m_seq))
+               if (read_seqretry(&mount_lock, nd->m_seq))
                        goto failed;
        }
-       nd->inode = nd->path.dentry->d_inode;
+       nd->inode = inode;
        return 0;
 
 failed:
@@ -1256,7 +1261,8 @@ static void follow_mount(struct path *path)
 
 static void follow_dotdot(struct nameidata *nd)
 {
-       set_root(nd);
+       if (!nd->root.mnt)
+               set_root(nd);
 
        while(1) {
                struct dentry *old = nd->path.dentry;
@@ -1634,8 +1640,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
 
 static inline unsigned int fold_hash(unsigned long hash)
 {
-       hash += hash >> (8*sizeof(int));
-       return hash;
+       return hash_64(hash, 32);
 }
 
 #else  /* 32-bit case */
@@ -1669,13 +1674,14 @@ EXPORT_SYMBOL(full_name_hash);
 
 /*
  * Calculate the length and hash of the path component, and
- * return the length of the component;
+ * fill in the qstr. return the "len" as the result.
  */
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline unsigned long hash_name(const char *name, struct qstr *res)
 {
        unsigned long a, b, adata, bdata, mask, hash, len;
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 
+       res->name = name;
        hash = a = 0;
        len = -sizeof(unsigned long);
        do {
@@ -1691,9 +1697,10 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
        mask = create_zero_mask(adata | bdata);
 
        hash += a & zero_bytemask(mask);
-       *hashp = fold_hash(hash);
+       len += find_zero(mask);
+       res->hash_len = hashlen_create(fold_hash(hash), len);
 
-       return len + find_zero(mask);
+       return len;
 }
 
 #else
@@ -1711,18 +1718,19 @@ EXPORT_SYMBOL(full_name_hash);
  * We know there's a real path component here of at least
  * one character.
  */
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline long hash_name(const char *name, struct qstr *res)
 {
        unsigned long hash = init_name_hash();
        unsigned long len = 0, c;
 
+       res->name = name;
        c = (unsigned char)*name;
        do {
                len++;
                hash = partial_name_hash(c, hash);
                c = (unsigned char)name[len];
        } while (c && c != '/');
-       *hashp = end_name_hash(hash);
+       res->hash_len = hashlen_create(end_name_hash(hash), len);
        return len;
 }
 
@@ -1756,9 +1764,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                if (err)
                        break;
 
-               len = hash_name(name, &this.hash);
-               this.name = name;
-               this.len = len;
+               len = hash_name(name, &this);
 
                type = LAST_NORM;
                if (name[0] == '.') switch (len) {
@@ -1852,7 +1858,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        if (*name=='/') {
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
-                       set_root_rcu(nd);
+                       nd->seq = set_root_rcu(nd);
                } else {
                        set_root(nd);
                        path_get(&nd->root);
@@ -1903,7 +1909,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        }
 
        nd->inode = nd->path.dentry->d_inode;
-       return 0;
+       if (!(flags & LOOKUP_RCU))
+               return 0;
+       if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
+               return 0;
+       if (!(nd->flags & LOOKUP_ROOT))
+               nd->root.mnt = NULL;
+       rcu_read_unlock();
+       return -ECHILD;
 }
 
 static inline int lookup_last(struct nameidata *nd, struct path *path)
index a01c7730e9af3ad07f3e993d2f7f27f04f615604..ef42d9bee2121f8e6a68937a5ecaa7670721ded9 100644 (file)
@@ -1217,6 +1217,11 @@ static void namespace_unlock(void)
        head.first->pprev = &head.first;
        INIT_HLIST_HEAD(&unmounted);
 
+       /* undo decrements we'd done in umount_tree() */
+       hlist_for_each_entry(mnt, &head, mnt_hash)
+               if (mnt->mnt_ex_mountpoint.mnt)
+                       mntget(mnt->mnt_ex_mountpoint.mnt);
+
        up_write(&namespace_sem);
 
        synchronize_rcu();
@@ -1253,6 +1258,9 @@ void umount_tree(struct mount *mnt, int how)
                hlist_add_head(&p->mnt_hash, &tmp_list);
        }
 
+       hlist_for_each_entry(p, &tmp_list, mnt_hash)
+               list_del_init(&p->mnt_child);
+
        if (how)
                propagate_umount(&tmp_list);
 
@@ -1263,9 +1271,9 @@ void umount_tree(struct mount *mnt, int how)
                p->mnt_ns = NULL;
                if (how < 2)
                        p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
-               list_del_init(&p->mnt_child);
                if (mnt_has_parent(p)) {
                        put_mountpoint(p->mnt_mp);
+                       mnt_add_count(p->mnt_parent, -1);
                        /* move the reference to mountpoint into ->mnt_ex_mountpoint */
                        p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
                        p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
index 1c5ff6d5838585c4b6b0806aa64b579e58a64bc0..6a4f3666e273e33cab30fb4de866ebf79f791dac 100644 (file)
@@ -1412,24 +1412,18 @@ int nfs_fs_proc_net_init(struct net *net)
        p = proc_create("volumes", S_IFREG|S_IRUGO,
                        nn->proc_nfsfs, &nfs_volume_list_fops);
        if (!p)
-               goto error_2;
+               goto error_1;
        return 0;
 
-error_2:
-       remove_proc_entry("servers", nn->proc_nfsfs);
 error_1:
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("nfsfs", net->proc_net);
 error_0:
        return -ENOMEM;
 }
 
 void nfs_fs_proc_net_exit(struct net *net)
 {
-       struct nfs_net *nn = net_generic(net, nfs_net_id);
-
-       remove_proc_entry("volumes", nn->proc_nfsfs);
-       remove_proc_entry("servers", nn->proc_nfsfs);
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("nfsfs", net->proc_net);
 }
 
 /*
index 1359c4a27393a6723fc3b22c244dd6422da4a9f3..90978075f7302a791813b6dd29c0ec9f9e5850eb 100644 (file)
@@ -1269,11 +1269,12 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
 static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx)
 {
        struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
-       struct pnfs_commit_bucket *bucket = fl_cinfo->buckets;
+       struct pnfs_commit_bucket *bucket;
        struct pnfs_layout_segment *freeme;
        int i;
 
-       for (i = idx; i < fl_cinfo->nbuckets; i++, bucket++) {
+       for (i = idx; i < fl_cinfo->nbuckets; i++) {
+               bucket = &fl_cinfo->buckets[i];
                if (list_empty(&bucket->committing))
                        continue;
                nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
index 92193eddb41dc315868af5f437f083a3a4c0302a..a8b855ab4e227b7d9b2c90d911f27ef4d49f9fb7 100644 (file)
@@ -130,16 +130,15 @@ enum {
  */
 
 struct nfs4_lock_state {
-       struct list_head                ls_locks;   /* Other lock stateids */
-       struct nfs4_state *             ls_state;   /* Pointer to open state */
+       struct list_head        ls_locks;       /* Other lock stateids */
+       struct nfs4_state *     ls_state;       /* Pointer to open state */
 #define NFS_LOCK_INITIALIZED 0
 #define NFS_LOCK_LOST        1
-       unsigned long                   ls_flags;
+       unsigned long           ls_flags;
        struct nfs_seqid_counter        ls_seqid;
-       nfs4_stateid                    ls_stateid;
-       atomic_t                        ls_count;
-       fl_owner_t                      ls_owner;
-       struct work_struct              ls_release;
+       nfs4_stateid            ls_stateid;
+       atomic_t                ls_count;
+       fl_owner_t              ls_owner;
 };
 
 /* bits for nfs4_state->flags */
index a043f618cd5a30ef35a8ec63d54ff12034a2387f..22fe35104c0c1cb30b1d1a27c9996380710a00c6 100644 (file)
@@ -799,18 +799,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
        return NULL;
 }
 
-static void
-free_lock_state_work(struct work_struct *work)
-{
-       struct nfs4_lock_state *lsp = container_of(work,
-                                       struct nfs4_lock_state, ls_release);
-       struct nfs4_state *state = lsp->ls_state;
-       struct nfs_server *server = state->owner->so_server;
-       struct nfs_client *clp = server->nfs_client;
-
-       clp->cl_mvops->free_lock_state(server, lsp);
-}
-
 /*
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
@@ -832,7 +820,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
        if (lsp->ls_seqid.owner_id < 0)
                goto out_free;
        INIT_LIST_HEAD(&lsp->ls_locks);
-       INIT_WORK(&lsp->ls_release, free_lock_state_work);
        return lsp;
 out_free:
        kfree(lsp);
@@ -896,12 +883,13 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
        if (list_empty(&state->lock_states))
                clear_bit(LK_STATE_IN_USE, &state->flags);
        spin_unlock(&state->state_lock);
-       if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags))
-               queue_work(nfsiod_workqueue, &lsp->ls_release);
-       else {
-               server = state->owner->so_server;
+       server = state->owner->so_server;
+       if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
+               struct nfs_client *clp = server->nfs_client;
+
+               clp->cl_mvops->free_lock_state(server, lsp);
+       } else
                nfs4_free_lock_state(server, lsp);
-       }
 }
 
 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
index f9821ce6658a4b09006e7d51d575319658753501..e94457c33ad630a3c290bf1c31486b55bd4884eb 100644 (file)
@@ -2657,6 +2657,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        struct xdr_stream *xdr = cd->xdr;
        int start_offset = xdr->buf->len;
        int cookie_offset;
+       u32 name_and_cookie;
        int entry_bytes;
        __be32 nfserr = nfserr_toosmall;
        __be64 wire_offset;
@@ -2718,7 +2719,14 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        cd->rd_maxcount -= entry_bytes;
        if (!cd->rd_dircount)
                goto fail;
-       cd->rd_dircount--;
+       /*
+        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
+        * let's always let through the first entry, at least:
+        */
+       name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+       if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+               goto fail;
+       cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
        cd->cookie_offset = cookie_offset;
 skip_entry:
        cd->common.err = nfs_ok;
@@ -3321,6 +3329,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
        }
        maxcount = min_t(int, maxcount-16, bytes_left);
 
+       /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+       if (!readdir->rd_dircount)
+               readdir->rd_dircount = INT_MAX;
+
        readdir->xdr = xdr;
        readdir->rd_maxcount = maxcount;
        readdir->common.err = 0;
index 238a5930cb3c7d16e1c76952e66c6bf24f5299ae..9d7e2b9659cbdf2687e26bbbf0dc784ba7cced6d 100644 (file)
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
 {
        struct {
                struct file_handle handle;
-               u8 pad[64];
+               u8 pad[MAX_HANDLE_SZ];
        } f;
        int size, ret, i;
 
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
        size = f.handle.handle_bytes >> 2;
 
        ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
-       if ((ret == 255) || (ret == -ENOSPC)) {
+       if ((ret == FILEID_INVALID) || (ret < 0)) {
                WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
                return 0;
        }
index 302bf22c4a30762013dbbfd64d0353250101eb62..aae331a5d03b2591e670c9a08451bd00ad8e66f9 100644 (file)
@@ -381,6 +381,7 @@ static void __propagate_umount(struct mount *mnt)
                 * other children
                 */
                if (child && list_empty(&child->mnt_mounts)) {
+                       list_del_init(&child->mnt_child);
                        hlist_del_init_rcu(&child->mnt_hash);
                        hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
                }
index b28d1dd10e8b70194a604a1fca654237edd817be..bdc729d80e5e4eb863ad4fcf849aa6ea886a7519 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -65,7 +65,7 @@ int sync_filesystem(struct super_block *sb)
                return ret;
        return __sync_filesystem(sb, 1);
 }
-EXPORT_SYMBOL_GPL(sync_filesystem);
+EXPORT_SYMBOL(sync_filesystem);
 
 static void sync_inodes_one_sb(struct super_block *sb, void *arg)
 {
index 6eaf5edf1ea1577e88cafc60184963e1b18df5a5..e77db621ec8985ad21878c45e970ebc3f23258cb 100644 (file)
@@ -45,7 +45,7 @@ void udf_free_inode(struct inode *inode)
        udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
 
-struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode)
 {
        struct super_block *sb = dir->i_sb;
        struct udf_sb_info *sbi = UDF_SB(sb);
@@ -55,14 +55,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        struct udf_inode_info *iinfo;
        struct udf_inode_info *dinfo = UDF_I(dir);
        struct logicalVolIntegrityDescImpUse *lvidiu;
+       int err;
 
        inode = new_inode(sb);
 
-       if (!inode) {
-               *err = -ENOMEM;
-               return NULL;
-       }
-       *err = -ENOSPC;
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
 
        iinfo = UDF_I(inode);
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
@@ -80,21 +78,22 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        }
        if (!iinfo->i_ext.i_data) {
                iput(inode);
-               *err = -ENOMEM;
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       err = -ENOSPC;
        block = udf_new_block(dir->i_sb, NULL,
                              dinfo->i_location.partitionReferenceNum,
-                             start, err);
-       if (*err) {
+                             start, &err);
+       if (err) {
                iput(inode);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        lvidiu = udf_sb_lvidiu(sb);
        if (lvidiu) {
                iinfo->i_unique = lvid_get_unique_id(sb);
+               inode->i_generation = iinfo->i_unique;
                mutex_lock(&sbi->s_alloc_mutex);
                if (S_ISDIR(mode))
                        le32_add_cpu(&lvidiu->numDirs, 1);
@@ -123,9 +122,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
                iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
        inode->i_mtime = inode->i_atime = inode->i_ctime =
                iinfo->i_crtime = current_fs_time(inode->i_sb);
-       insert_inode_hash(inode);
+       if (unlikely(insert_inode_locked(inode) < 0)) {
+               make_bad_inode(inode);
+               iput(inode);
+               return ERR_PTR(-EIO);
+       }
        mark_inode_dirty(inode);
 
-       *err = 0;
        return inode;
 }
index 236cd48184c2df20e75bc9fee098ded782f2560f..08598843288fe0c2dd206940d6437c606ef41c1f 100644 (file)
@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL");
 
 static umode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
-static void udf_fill_inode(struct inode *, struct buffer_head *);
 static int udf_sync_inode(struct inode *inode);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
@@ -1271,12 +1270,33 @@ update_time:
        return 0;
 }
 
-static void __udf_read_inode(struct inode *inode)
+/*
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
+ * inode on write-once media but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_ICB_NESTING 1024
+
+static int udf_read_inode(struct inode *inode)
 {
        struct buffer_head *bh = NULL;
        struct fileEntry *fe;
+       struct extendedFileEntry *efe;
        uint16_t ident;
        struct udf_inode_info *iinfo = UDF_I(inode);
+       struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
+       struct kernel_lb_addr *iloc = &iinfo->i_location;
+       unsigned int link_count;
+       unsigned int indirections = 0;
+       int ret = -EIO;
+
+reread:
+       if (iloc->logicalBlockNum >=
+           sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
+               udf_debug("block=%d, partition=%d out of range\n",
+                         iloc->logicalBlockNum, iloc->partitionReferenceNum);
+               return -EIO;
+       }
 
        /*
         * Set defaults, but the inode is still incomplete!
@@ -1290,78 +1310,54 @@ static void __udf_read_inode(struct inode *inode)
         *      i_nlink = 1
         *      i_op = NULL;
         */
-       bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
+       bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
        if (!bh) {
                udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
-               make_bad_inode(inode);
-               return;
+               return -EIO;
        }
 
        if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
            ident != TAG_IDENT_USE) {
                udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
                        inode->i_ino, ident);
-               brelse(bh);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
 
        fe = (struct fileEntry *)bh->b_data;
+       efe = (struct extendedFileEntry *)bh->b_data;
 
        if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
                struct buffer_head *ibh;
 
-               ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
-                                       &ident);
+               ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
                if (ident == TAG_IDENT_IE && ibh) {
-                       struct buffer_head *nbh = NULL;
                        struct kernel_lb_addr loc;
                        struct indirectEntry *ie;
 
                        ie = (struct indirectEntry *)ibh->b_data;
                        loc = lelb_to_cpu(ie->indirectICB.extLocation);
 
-                       if (ie->indirectICB.extLength &&
-                               (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
-                                                       &ident))) {
-                               if (ident == TAG_IDENT_FE ||
-                                       ident == TAG_IDENT_EFE) {
-                                       memcpy(&iinfo->i_location,
-                                               &loc,
-                                               sizeof(struct kernel_lb_addr));
-                                       brelse(bh);
-                                       brelse(ibh);
-                                       brelse(nbh);
-                                       __udf_read_inode(inode);
-                                       return;
+                       if (ie->indirectICB.extLength) {
+                               brelse(ibh);
+                               memcpy(&iinfo->i_location, &loc,
+                                      sizeof(struct kernel_lb_addr));
+                               if (++indirections > UDF_MAX_ICB_NESTING) {
+                                       udf_err(inode->i_sb,
+                                               "too many ICBs in ICB hierarchy"
+                                               " (max %d supported)\n",
+                                               UDF_MAX_ICB_NESTING);
+                                       goto out;
                                }
-                               brelse(nbh);
+                               brelse(bh);
+                               goto reread;
                        }
                }
                brelse(ibh);
        } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
                udf_err(inode->i_sb, "unsupported strategy type: %d\n",
                        le16_to_cpu(fe->icbTag.strategyType));
-               brelse(bh);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
-       udf_fill_inode(inode, bh);
-
-       brelse(bh);
-}
-
-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
-{
-       struct fileEntry *fe;
-       struct extendedFileEntry *efe;
-       struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
-       struct udf_inode_info *iinfo = UDF_I(inode);
-       unsigned int link_count;
-
-       fe = (struct fileEntry *)bh->b_data;
-       efe = (struct extendedFileEntry *)bh->b_data;
-
        if (fe->icbTag.strategyType == cpu_to_le16(4))
                iinfo->i_strat4096 = 0;
        else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
@@ -1378,11 +1374,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
                iinfo->i_efe = 1;
                iinfo->i_use = 0;
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                       sizeof(struct extendedFileEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                       sizeof(struct extendedFileEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct extendedFileEntry),
                       inode->i_sb->s_blocksize -
@@ -1390,11 +1385,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
                iinfo->i_efe = 0;
                iinfo->i_use = 0;
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                               sizeof(struct fileEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                               sizeof(struct fileEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct fileEntry),
                       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
@@ -1404,18 +1398,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                iinfo->i_lenAlloc = le32_to_cpu(
                                ((struct unallocSpaceEntry *)bh->b_data)->
                                 lengthAllocDescs);
-               if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                       sizeof(struct unallocSpaceEntry))) {
-                       make_bad_inode(inode);
-                       return;
-               }
+               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+                                       sizeof(struct unallocSpaceEntry));
+               if (ret)
+                       goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct unallocSpaceEntry),
                       inode->i_sb->s_blocksize -
                                        sizeof(struct unallocSpaceEntry));
-               return;
+               return 0;
        }
 
+       ret = -EIO;
        read_lock(&sbi->s_cred_lock);
        i_uid_write(inode, le32_to_cpu(fe->uid));
        if (!uid_valid(inode->i_uid) ||
@@ -1441,8 +1435,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        read_unlock(&sbi->s_cred_lock);
 
        link_count = le16_to_cpu(fe->fileLinkCount);
-       if (!link_count)
-               link_count = 1;
+       if (!link_count) {
+               ret = -ESTALE;
+               goto out;
+       }
        set_nlink(inode, link_count);
 
        inode->i_size = le64_to_cpu(fe->informationLength);
@@ -1488,6 +1484,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
                iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
        }
+       inode->i_generation = iinfo->i_unique;
 
        switch (fe->icbTag.fileType) {
        case ICBTAG_FILE_TYPE_DIRECTORY:
@@ -1537,8 +1534,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        default:
                udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
                        inode->i_ino, fe->icbTag.fileType);
-               make_bad_inode(inode);
-               return;
+               goto out;
        }
        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                struct deviceSpec *dsea =
@@ -1549,8 +1545,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
                                      le32_to_cpu(dsea->minorDeviceIdent)));
                        /* Developer ID ??? */
                } else
-                       make_bad_inode(inode);
+                       goto out;
        }
+       ret = 0;
+out:
+       brelse(bh);
+       return ret;
 }
 
 static int udf_alloc_i_data(struct inode *inode, size_t size)
@@ -1664,7 +1664,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                     FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
        fe->permissions = cpu_to_le32(udfperms);
 
-       if (S_ISDIR(inode->i_mode))
+       if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
                fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
        else
                fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
@@ -1830,32 +1830,23 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
 {
        unsigned long block = udf_get_lb_pblock(sb, ino, 0);
        struct inode *inode = iget_locked(sb, block);
+       int err;
 
        if (!inode)
-               return NULL;
-
-       if (inode->i_state & I_NEW) {
-               memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
-               __udf_read_inode(inode);
-               unlock_new_inode(inode);
-       }
+               return ERR_PTR(-ENOMEM);
 
-       if (is_bad_inode(inode))
-               goto out_iput;
+       if (!(inode->i_state & I_NEW))
+               return inode;
 
-       if (ino->logicalBlockNum >= UDF_SB(sb)->
-                       s_partmaps[ino->partitionReferenceNum].s_partition_len) {
-               udf_debug("block=%d, partition=%d out of range\n",
-                         ino->logicalBlockNum, ino->partitionReferenceNum);
-               make_bad_inode(inode);
-               goto out_iput;
+       memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+       err = udf_read_inode(inode);
+       if (err < 0) {
+               iget_failed(inode);
+               return ERR_PTR(err);
        }
+       unlock_new_inode(inode);
 
        return inode;
-
- out_iput:
-       iput(inode);
-       return NULL;
 }
 
 int udf_add_aext(struct inode *inode, struct extent_position *epos,
index 83a06001742bfaab6781bc26509517196fc3af16..c12e260fd6c417eb9c690782b8860f0e5eeff8d9 100644 (file)
@@ -270,9 +270,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
                                                NULL, 0),
                };
                inode = udf_iget(dir->i_sb, lb);
-               if (!inode) {
-                       return ERR_PTR(-EACCES);
-               }
+               if (IS_ERR(inode))
+                       return inode;
        } else
 #endif /* UDF_RECOVERY */
 
@@ -285,9 +284,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
 
                loc = lelb_to_cpu(cfi.icb.extLocation);
                inode = udf_iget(dir->i_sb, &loc);
-               if (!inode) {
-                       return ERR_PTR(-EACCES);
-               }
+               if (IS_ERR(inode))
+                       return ERR_CAST(inode);
        }
 
        return d_splice_alias(inode, dentry);
@@ -550,32 +548,18 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
        return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
 }
 
-static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-                     bool excl)
+static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
 {
+       struct udf_inode_info *iinfo = UDF_I(inode);
+       struct inode *dir = dentry->d_parent->d_inode;
        struct udf_fileident_bh fibh;
-       struct inode *inode;
        struct fileIdentDesc cfi, *fi;
        int err;
-       struct udf_inode_info *iinfo;
-
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode) {
-               return err;
-       }
-
-       iinfo = UDF_I(inode);
-       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               inode->i_data.a_ops = &udf_adinicb_aops;
-       else
-               inode->i_data.a_ops = &udf_aops;
-       inode->i_op = &udf_file_inode_operations;
-       inode->i_fop = &udf_file_operations;
-       mark_inode_dirty(inode);
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi) {
+       if (unlikely(!fi)) {
                inode_dec_link_count(inode);
+               unlock_new_inode(inode);
                iput(inode);
                return err;
        }
@@ -589,23 +573,21 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
        brelse(fibh.sbh);
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 
        return 0;
 }
 
-static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+                     bool excl)
 {
-       struct inode *inode;
-       struct udf_inode_info *iinfo;
-       int err;
+       struct inode *inode = udf_new_inode(dir, mode);
 
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode)
-               return err;
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       iinfo = UDF_I(inode);
-       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+       if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                inode->i_data.a_ops = &udf_adinicb_aops;
        else
                inode->i_data.a_ops = &udf_aops;
@@ -613,7 +595,25 @@ static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_fop = &udf_file_operations;
        mark_inode_dirty(inode);
 
+       return udf_add_nondir(dentry, inode);
+}
+
+static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       struct inode *inode = udf_new_inode(dir, mode);
+
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
+
+       if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+               inode->i_data.a_ops = &udf_adinicb_aops;
+       else
+               inode->i_data.a_ops = &udf_aops;
+       inode->i_op = &udf_file_inode_operations;
+       inode->i_fop = &udf_file_operations;
+       mark_inode_dirty(inode);
        d_tmpfile(dentry, inode);
+       unlock_new_inode(inode);
        return 0;
 }
 
@@ -621,44 +621,16 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
                     dev_t rdev)
 {
        struct inode *inode;
-       struct udf_fileident_bh fibh;
-       struct fileIdentDesc cfi, *fi;
-       int err;
-       struct udf_inode_info *iinfo;
 
        if (!old_valid_dev(rdev))
                return -EINVAL;
 
-       err = -EIO;
-       inode = udf_new_inode(dir, mode, &err);
-       if (!inode)
-               goto out;
+       inode = udf_new_inode(dir, mode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       iinfo = UDF_I(inode);
        init_special_inode(inode, mode, rdev);
-       fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi) {
-               inode_dec_link_count(inode);
-               iput(inode);
-               return err;
-       }
-       cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
-       cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-       *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-               cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
-       udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-       if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               mark_inode_dirty(dir);
-       mark_inode_dirty(inode);
-
-       if (fibh.sbh != fibh.ebh)
-               brelse(fibh.ebh);
-       brelse(fibh.sbh);
-       d_instantiate(dentry, inode);
-       err = 0;
-
-out:
-       return err;
+       return udf_add_nondir(dentry, inode);
 }
 
 static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -670,10 +642,9 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct udf_inode_info *dinfo = UDF_I(dir);
        struct udf_inode_info *iinfo;
 
-       err = -EIO;
-       inode = udf_new_inode(dir, S_IFDIR | mode, &err);
-       if (!inode)
-               goto out;
+       inode = udf_new_inode(dir, S_IFDIR | mode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
        iinfo = UDF_I(inode);
        inode->i_op = &udf_dir_inode_operations;
@@ -681,6 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
        if (!fi) {
                inode_dec_link_count(inode);
+               unlock_new_inode(inode);
                iput(inode);
                goto out;
        }
@@ -699,6 +671,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (!fi) {
                clear_nlink(inode);
                mark_inode_dirty(inode);
+               unlock_new_inode(inode);
                iput(inode);
                goto out;
        }
@@ -710,6 +683,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
        inc_nlink(dir);
        mark_inode_dirty(dir);
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
@@ -876,14 +850,11 @@ out:
 static int udf_symlink(struct inode *dir, struct dentry *dentry,
                       const char *symname)
 {
-       struct inode *inode;
+       struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO);
        struct pathComponent *pc;
        const char *compstart;
-       struct udf_fileident_bh fibh;
        struct extent_position epos = {};
        int eoffset, elen = 0;
-       struct fileIdentDesc *fi;
-       struct fileIdentDesc cfi;
        uint8_t *ea;
        int err;
        int block;
@@ -892,9 +863,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
        struct udf_inode_info *iinfo;
        struct super_block *sb = dir->i_sb;
 
-       inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
-       if (!inode)
-               goto out;
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
        iinfo = UDF_I(inode);
        down_write(&iinfo->i_data_sem);
@@ -1012,32 +982,15 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
        mark_inode_dirty(inode);
        up_write(&iinfo->i_data_sem);
 
-       fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
-       if (!fi)
-               goto out_fail;
-       cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
-       cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-       if (UDF_SB(inode->i_sb)->s_lvid_bh) {
-               *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-                       cpu_to_le32(lvid_get_unique_id(sb));
-       }
-       udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-       if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-               mark_inode_dirty(dir);
-       if (fibh.sbh != fibh.ebh)
-               brelse(fibh.ebh);
-       brelse(fibh.sbh);
-       d_instantiate(dentry, inode);
-       err = 0;
-
+       err = udf_add_nondir(dentry, inode);
 out:
        kfree(name);
        return err;
 
 out_no_entry:
        up_write(&iinfo->i_data_sem);
-out_fail:
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -1222,7 +1175,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
        struct udf_fileident_bh fibh;
 
        if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
-               goto out_unlock;
+               return ERR_PTR(-EACCES);
 
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
@@ -1230,12 +1183,10 @@ static struct dentry *udf_get_parent(struct dentry *child)
 
        tloc = lelb_to_cpu(cfi.icb.extLocation);
        inode = udf_iget(child->d_inode->i_sb, &tloc);
-       if (!inode)
-               goto out_unlock;
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
        return d_obtain_alias(inode);
-out_unlock:
-       return ERR_PTR(-EACCES);
 }
 
 
@@ -1252,8 +1203,8 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
        loc.partitionReferenceNum = partref;
        inode = udf_iget(sb, &loc);
 
-       if (inode == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
        if (generation && inode->i_generation != generation) {
                iput(inode);
index 813da94d447b3b44418d2ce84bc70121cb62c92e..5401fc33f5cc906b76b846e62f74751cabd50a68 100644 (file)
@@ -961,12 +961,14 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 
        metadata_fe = udf_iget(sb, &addr);
 
-       if (metadata_fe == NULL)
+       if (IS_ERR(metadata_fe)) {
                udf_warn(sb, "metadata inode efe not found\n");
-       else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
+               return metadata_fe;
+       }
+       if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
                udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
                iput(metadata_fe);
-               metadata_fe = NULL;
+               return ERR_PTR(-EIO);
        }
 
        return metadata_fe;
@@ -978,6 +980,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        struct udf_part_map *map;
        struct udf_meta_data *mdata;
        struct kernel_lb_addr addr;
+       struct inode *fe;
 
        map = &sbi->s_partmaps[partition];
        mdata = &map->s_type_specific.s_metadata;
@@ -986,22 +989,24 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        udf_debug("Metadata file location: block = %d part = %d\n",
                  mdata->s_meta_file_loc, map->s_partition_num);
 
-       mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
-               mdata->s_meta_file_loc, map->s_partition_num);
-
-       if (mdata->s_metadata_fe == NULL) {
+       fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
+                                        map->s_partition_num);
+       if (IS_ERR(fe)) {
                /* mirror file entry */
                udf_debug("Mirror metadata file location: block = %d part = %d\n",
                          mdata->s_mirror_file_loc, map->s_partition_num);
 
-               mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-                       mdata->s_mirror_file_loc, map->s_partition_num);
+               fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
+                                                map->s_partition_num);
 
-               if (mdata->s_mirror_fe == NULL) {
+               if (IS_ERR(fe)) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
-                       return -EIO;
+                       return PTR_ERR(fe);
                }
-       }
+               mdata->s_mirror_fe = fe;
+       } else
+               mdata->s_metadata_fe = fe;
+
 
        /*
         * bitmap file entry
@@ -1015,15 +1020,16 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
                udf_debug("Bitmap file location: block = %d part = %d\n",
                          addr.logicalBlockNum, addr.partitionReferenceNum);
 
-               mdata->s_bitmap_fe = udf_iget(sb, &addr);
-               if (mdata->s_bitmap_fe == NULL) {
+               fe = udf_iget(sb, &addr);
+               if (IS_ERR(fe)) {
                        if (sb->s_flags & MS_RDONLY)
                                udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
                        else {
                                udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
-                               return -EIO;
+                               return PTR_ERR(fe);
                        }
-               }
+               } else
+                       mdata->s_bitmap_fe = fe;
        }
 
        udf_debug("udf_load_metadata_files Ok\n");
@@ -1111,13 +1117,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                                phd->unallocSpaceTable.extPosition),
                        .partitionReferenceNum = p_index,
                };
+               struct inode *inode;
 
-               map->s_uspace.s_table = udf_iget(sb, &loc);
-               if (!map->s_uspace.s_table) {
+               inode = udf_iget(sb, &loc);
+               if (IS_ERR(inode)) {
                        udf_debug("cannot load unallocSpaceTable (part %d)\n",
                                  p_index);
-                       return -EIO;
+                       return PTR_ERR(inode);
                }
+               map->s_uspace.s_table = inode;
                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
                udf_debug("unallocSpaceTable (part %d) @ %ld\n",
                          p_index, map->s_uspace.s_table->i_ino);
@@ -1144,14 +1152,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                                phd->freedSpaceTable.extPosition),
                        .partitionReferenceNum = p_index,
                };
+               struct inode *inode;
 
-               map->s_fspace.s_table = udf_iget(sb, &loc);
-               if (!map->s_fspace.s_table) {
+               inode = udf_iget(sb, &loc);
+               if (IS_ERR(inode)) {
                        udf_debug("cannot load freedSpaceTable (part %d)\n",
                                  p_index);
-                       return -EIO;
+                       return PTR_ERR(inode);
                }
-
+               map->s_fspace.s_table = inode;
                map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
                udf_debug("freedSpaceTable (part %d) @ %ld\n",
                          p_index, map->s_fspace.s_table->i_ino);
@@ -1178,6 +1187,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
        struct udf_part_map *map = &sbi->s_partmaps[p_index];
        sector_t vat_block;
        struct kernel_lb_addr ino;
+       struct inode *inode;
 
        /*
         * VAT file entry is in the last recorded block. Some broken disks have
@@ -1186,10 +1196,13 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
        ino.partitionReferenceNum = type1_index;
        for (vat_block = start_block;
             vat_block >= map->s_partition_root &&
-            vat_block >= start_block - 3 &&
-            !sbi->s_vat_inode; vat_block--) {
+            vat_block >= start_block - 3; vat_block--) {
                ino.logicalBlockNum = vat_block - map->s_partition_root;
-               sbi->s_vat_inode = udf_iget(sb, &ino);
+               inode = udf_iget(sb, &ino);
+               if (!IS_ERR(inode)) {
+                       sbi->s_vat_inode = inode;
+                       break;
+               }
        }
 }
 
@@ -2205,10 +2218,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        /* assign inodes by physical block number */
        /* perhaps it's not extensible enough, but for now ... */
        inode = udf_iget(sb, &rootdir);
-       if (!inode) {
+       if (IS_ERR(inode)) {
                udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
                       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
-               ret = -EIO;
+               ret = PTR_ERR(inode);
                goto error_out;
        }
 
index be7dabbbcb49ef171b33e0ec2bc4a672c073282d..742557be9936399fba764f645c04b969d2a3cfd5 100644 (file)
@@ -143,7 +143,6 @@ extern int udf_expand_file_adinicb(struct inode *);
 extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
 extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
 extern int udf_setsize(struct inode *, loff_t);
-extern void udf_read_inode(struct inode *);
 extern void udf_evict_inode(struct inode *);
 extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
 extern long udf_block_map(struct inode *, sector_t);
@@ -209,7 +208,7 @@ extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t);
 
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
index 7c580c97990ee05dee7e5ad42a9c5754bfe4384d..be7d42c7d9382bf8072a7e6e9eff7e03b24bab25 100644 (file)
@@ -902,9 +902,6 @@ void ufs_evict_inode(struct inode * inode)
        invalidate_inode_buffers(inode);
        clear_inode(inode);
 
-       if (want_delete) {
-               lock_ufs(inode->i_sb);
-               ufs_free_inode (inode);
-               unlock_ufs(inode->i_sb);
-       }
+       if (want_delete)
+               ufs_free_inode(inode);
 }
index 90d74b8f8eba8dacd536c74ac161cceddc241196..2df62a73f20ce2071c17d81c343ada85ae287d2a 100644 (file)
@@ -126,12 +126,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
        if (l > sb->s_blocksize)
                goto out_notlocked;
 
-       lock_ufs(dir->i_sb);
        inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
        err = PTR_ERR(inode);
        if (IS_ERR(inode))
-               goto out;
+               goto out_notlocked;
 
+       lock_ufs(dir->i_sb);
        if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
                /* slow symlink */
                inode->i_op = &ufs_symlink_inode_operations;
@@ -181,13 +181,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
        struct inode * inode;
        int err;
 
-       lock_ufs(dir->i_sb);
-       inode_inc_link_count(dir);
-
        inode = ufs_new_inode(dir, S_IFDIR|mode);
-       err = PTR_ERR(inode);
        if (IS_ERR(inode))
-               goto out_dir;
+               return PTR_ERR(inode);
 
        inode->i_op = &ufs_dir_inode_operations;
        inode->i_fop = &ufs_dir_operations;
@@ -195,6 +191,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 
        inode_inc_link_count(inode);
 
+       lock_ufs(dir->i_sb);
+       inode_inc_link_count(dir);
+
        err = ufs_make_empty(inode, dir);
        if (err)
                goto out_fail;
@@ -212,7 +211,6 @@ out_fail:
        inode_dec_link_count(inode);
        inode_dec_link_count(inode);
        iput (inode);
-out_dir:
        inode_dec_link_count(dir);
        unlock_ufs(dir->i_sb);
        goto out;
index de2d26d328446e65698dca0ff9f4fe2317c3f04e..86df952d3e24e7054477338f479d325fa212ce0c 100644 (file)
@@ -5424,7 +5424,7 @@ xfs_bmap_shift_extents(
        struct xfs_bmap_free    *flist,
        int                     num_exts)
 {
-       struct xfs_btree_cur            *cur;
+       struct xfs_btree_cur            *cur = NULL;
        struct xfs_bmbt_rec_host        *gotp;
        struct xfs_bmbt_irec            got;
        struct xfs_bmbt_irec            left;
@@ -5435,7 +5435,7 @@ xfs_bmap_shift_extents(
        int                             error = 0;
        int                             i;
        int                             whichfork = XFS_DATA_FORK;
-       int                             logflags;
+       int                             logflags = 0;
        xfs_filblks_t                   blockcount = 0;
        int                             total_extents;
 
@@ -5478,16 +5478,11 @@ xfs_bmap_shift_extents(
                }
        }
 
-       /* We are going to change core inode */
-       logflags = XFS_ILOG_CORE;
        if (ifp->if_flags & XFS_IFBROOT) {
                cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
                cur->bc_private.b.firstblock = *firstblock;
                cur->bc_private.b.flist = flist;
                cur->bc_private.b.flags = 0;
-       } else {
-               cur = NULL;
-               logflags |= XFS_ILOG_DEXT;
        }
 
        /*
@@ -5545,11 +5540,14 @@ xfs_bmap_shift_extents(
                        blockcount = left.br_blockcount +
                                got.br_blockcount;
                        xfs_iext_remove(ip, *current_ext, 1, 0);
+                       logflags |= XFS_ILOG_CORE;
                        if (cur) {
                                error = xfs_btree_delete(cur, &i);
                                if (error)
                                        goto del_cursor;
                                XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+                       } else {
+                               logflags |= XFS_ILOG_DEXT;
                        }
                        XFS_IFORK_NEXT_SET(ip, whichfork,
                                XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -5575,6 +5573,7 @@ xfs_bmap_shift_extents(
                        got.br_startoff = startoff;
                }
 
+               logflags |= XFS_ILOG_CORE;
                if (cur) {
                        error = xfs_bmbt_update(cur, got.br_startoff,
                                                got.br_startblock,
@@ -5582,6 +5581,8 @@ xfs_bmap_shift_extents(
                                                got.br_state);
                        if (error)
                                goto del_cursor;
+               } else {
+                       logflags |= XFS_ILOG_DEXT;
                }
 
                (*current_ext)++;
@@ -5597,6 +5598,7 @@ del_cursor:
                xfs_btree_del_cursor(cur,
                        error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
 
-       xfs_trans_log_inode(tp, ip, logflags);
+       if (logflags)
+               xfs_trans_log_inode(tp, ip, logflags);
        return error;
 }
index 11e9b4caa54f168f7e429f5d7e5a01302d026e1c..b984647c24db02e98b4e030fd03c7bac75740923 100644 (file)
@@ -1753,11 +1753,72 @@ xfs_vm_readpages(
        return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
 }
 
+/*
+ * This is basically a copy of __set_page_dirty_buffers() with one
+ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
+ * dirty, we'll never be able to clean them because we don't write buffers
+ * beyond EOF, and that means we can't invalidate pages that span EOF
+ * that have been marked dirty. Further, the dirty state can leak into
+ * the file interior if the file is extended, resulting in all sorts of
+ * bad things happening as the state does not match the underlying data.
+ *
+ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
+ * this only exist because of bufferheads and how the generic code manages them.
+ */
+STATIC int
+xfs_vm_set_page_dirty(
+       struct page             *page)
+{
+       struct address_space    *mapping = page->mapping;
+       struct inode            *inode = mapping->host;
+       loff_t                  end_offset;
+       loff_t                  offset;
+       int                     newly_dirty;
+
+       if (unlikely(!mapping))
+               return !TestSetPageDirty(page);
+
+       end_offset = i_size_read(inode);
+       offset = page_offset(page);
+
+       spin_lock(&mapping->private_lock);
+       if (page_has_buffers(page)) {
+               struct buffer_head *head = page_buffers(page);
+               struct buffer_head *bh = head;
+
+               do {
+                       if (offset < end_offset)
+                               set_buffer_dirty(bh);
+                       bh = bh->b_this_page;
+                       offset += 1 << inode->i_blkbits;
+               } while (bh != head);
+       }
+       newly_dirty = !TestSetPageDirty(page);
+       spin_unlock(&mapping->private_lock);
+
+       if (newly_dirty) {
+               /* sigh - __set_page_dirty() is static, so copy it here, too */
+               unsigned long flags;
+
+               spin_lock_irqsave(&mapping->tree_lock, flags);
+               if (page->mapping) {    /* Race with truncate? */
+                       WARN_ON_ONCE(!PageUptodate(page));
+                       account_page_dirtied(page, mapping);
+                       radix_tree_tag_set(&mapping->page_tree,
+                                       page_index(page), PAGECACHE_TAG_DIRTY);
+               }
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       }
+       return newly_dirty;
+}
+
 const struct address_space_operations xfs_address_space_operations = {
        .readpage               = xfs_vm_readpage,
        .readpages              = xfs_vm_readpages,
        .writepage              = xfs_vm_writepage,
        .writepages             = xfs_vm_writepages,
+       .set_page_dirty         = xfs_vm_set_page_dirty,
        .releasepage            = xfs_vm_releasepage,
        .invalidatepage         = xfs_vm_invalidatepage,
        .write_begin            = xfs_vm_write_begin,
index 2f1e30d39a3540f9d545e8ab9c5afd52af0047a9..1707980f9a4b564842cd435d9efc70571a5a9b18 100644 (file)
@@ -1470,6 +1470,26 @@ xfs_collapse_file_space(
        start_fsb = XFS_B_TO_FSB(mp, offset + len);
        shift_fsb = XFS_B_TO_FSB(mp, len);
 
+       /*
+        * Writeback the entire file and force remove any post-eof blocks. The
+        * writeback prevents changes to the extent list via concurrent
+        * writeback and the eofblocks trim prevents the extent shift algorithm
+        * from running into a post-eof delalloc extent.
+        *
+        * XXX: This is a temporary fix until the extent shift loop below is
+        * converted to use offsets and lookups within the ILOCK rather than
+        * carrying around the index into the extent list for the next
+        * iteration.
+        */
+       error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+       if (error)
+               return error;
+       if (xfs_can_free_eofblocks(ip, true)) {
+               error = xfs_free_eofblocks(mp, ip, false);
+               if (error)
+                       return error;
+       }
+
        error = xfs_free_file_space(ip, offset, len);
        if (error)
                return error;
index 076b1708d1345474ec2fd4e3a4c2e3bc605cb75a..de5368c803f9db192416e21032ade4535a0efcdd 100644 (file)
@@ -291,12 +291,22 @@ xfs_file_read_iter(
                if (inode->i_mapping->nrpages) {
                        ret = filemap_write_and_wait_range(
                                                        VFS_I(ip)->i_mapping,
-                                                       pos, -1);
+                                                       pos, pos + size - 1);
                        if (ret) {
                                xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
                                return ret;
                        }
-                       truncate_pagecache_range(VFS_I(ip), pos, -1);
+
+                       /*
+                        * Invalidate whole pages. This can return an error if
+                        * we fail to invalidate a page, but this should never
+                        * happen on XFS. Warn if it does fail.
+                        */
+                       ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+                                       pos >> PAGE_CACHE_SHIFT,
+                                       (pos + size - 1) >> PAGE_CACHE_SHIFT);
+                       WARN_ON_ONCE(ret);
+                       ret = 0;
                }
                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
        }
@@ -632,10 +642,19 @@ xfs_file_dio_aio_write(
 
        if (mapping->nrpages) {
                ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-                                                   pos, -1);
+                                                   pos, pos + count - 1);
                if (ret)
                        goto out;
-               truncate_pagecache_range(VFS_I(ip), pos, -1);
+               /*
+                * Invalidate whole pages. This can return an error if
+                * we fail to invalidate a page, but this should never
+                * happen on XFS. Warn if it does fail.
+                */
+               ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+                                       pos >> PAGE_CACHE_SHIFT,
+                                       (pos + count - 1) >> PAGE_CACHE_SHIFT);
+               WARN_ON_ONCE(ret);
+               ret = 0;
        }
 
        /*
index bcfd808b1098e81e410310e33582d814274188b8..c1c9de19edbe8abc62f2508bf9a7723bd9027d08 100644 (file)
@@ -246,7 +246,6 @@ struct acpi_device_pnp {
        acpi_device_name device_name;   /* Driver-determined */
        acpi_device_class device_class; /*        "          */
        union acpi_object *str_obj;     /* unicode string for _STR method */
-       unsigned long sun;              /* _SUN */
 };
 
 #define acpi_device_bid(d)     ((d)->pnp.bus_id)
index b535e9da7de6df14402f3b4008dcb18e5b7e322e..961b9c130ea90ff62c4ded40f376c4bae6e094e2 100644 (file)
  */
 #define CLK_NR_CLKS                    248
 
+/*
+ * CMU DMC
+ */
+
+#define CLK_FOUT_BPLL                  1
+#define CLK_FOUT_EPLL                  2
+
+/* Muxes */
+#define CLK_MOUT_MPLL_MIF              8
+#define CLK_MOUT_BPLL                  9
+#define CLK_MOUT_DPHY                  10
+#define CLK_MOUT_DMC_BUS               11
+#define CLK_MOUT_EPLL                  12
+
+/* Dividers */
+#define CLK_DIV_DMC                    16
+#define CLK_DIV_DPHY                   17
+#define CLK_DIV_DMC_PRE                        18
+#define CLK_DIV_DMCP                   19
+#define CLK_DIV_DMCD                   20
+
+/*
+ * Total number of clocks of main CMU.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define NR_CLKS_DMC                    21
+
 #endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
index 459bd2bd411fbe1387b7949ad218f5df6b845a01..34fe28c622d0a6c2cbfbef348be34dfefa2b59f9 100644 (file)
 #define CLK_SMMU_MFCR          275
 #define CLK_G3D                        276
 #define CLK_G2D                        277
-#define CLK_ROTATOR            278 /* Exynos4210 only */
-#define CLK_MDMA               279 /* Exynos4210 only */
-#define CLK_SMMU_G2D           280 /* Exynos4210 only */
-#define CLK_SMMU_ROTATOR       281 /* Exynos4210 only */
-#define CLK_SMMU_MDMA          282 /* Exynos4210 only */
+#define CLK_ROTATOR            278
+#define CLK_MDMA               279
+#define CLK_SMMU_G2D           280
+#define CLK_SMMU_ROTATOR       281
+#define CLK_SMMU_MDMA          282
 #define CLK_FIMD0              283
 #define CLK_MIE0               284
 #define CLK_MDNIE0             285 /* Exynos4412 only */
 #define CLK_MOUT_G3D1          393
 #define CLK_MOUT_G3D           394
 #define CLK_ACLK400_MCUISP     395 /* Exynos4x12 only */
+#define CLK_MOUT_HDMI          396
+#define CLK_MOUT_MIXER         397
 
 /* gate clocks - ppmu */
 #define CLK_PPMULEFT           400
index aad579a758020555966fa484c749bf0eb49a3bb0..fd29c174ba632114ff720a4a885d5b3beb403139 100644 (file)
@@ -46,6 +46,7 @@
 #define HIX5HD2_SFC_MUX                        64
 #define HIX5HD2_MMC_MUX                        65
 #define HIX5HD2_FEPHY_MUX              66
+#define HIX5HD2_SD_MUX                 67
 
 /* gate clocks */
 #define HIX5HD2_SFC_RST                        128
 #define HIX5HD2_MMC_CIU_CLK            130
 #define HIX5HD2_MMC_BIU_CLK            131
 #define HIX5HD2_MMC_CIU_RST            132
+#define HIX5HD2_FWD_BUS_CLK            133
+#define HIX5HD2_FWD_SYS_CLK            134
+#define HIX5HD2_MAC0_PHY_CLK           135
+#define HIX5HD2_SD_CIU_CLK             136
+#define HIX5HD2_SD_BIU_CLK             137
+#define HIX5HD2_SD_CIU_RST             138
+#define HIX5HD2_WDG0_CLK               139
+#define HIX5HD2_WDG0_RST               140
+#define HIX5HD2_I2C0_CLK               141
+#define HIX5HD2_I2C0_RST               142
+#define HIX5HD2_I2C1_CLK               143
+#define HIX5HD2_I2C1_RST               144
+#define HIX5HD2_I2C2_CLK               145
+#define HIX5HD2_I2C2_RST               146
+#define HIX5HD2_I2C3_CLK               147
+#define HIX5HD2_I2C3_RST               148
+#define HIX5HD2_I2C4_CLK               149
+#define HIX5HD2_I2C4_RST               150
+#define HIX5HD2_I2C5_CLK               151
+#define HIX5HD2_I2C5_RST               152
+
+/* complex */
+#define HIX5HD2_MAC0_CLK               192
+#define HIX5HD2_MAC1_CLK               193
+#define HIX5HD2_SATA_CLK               194
+#define HIX5HD2_USB_CLK                        195
 
 #define HIX5HD2_NR_CLKS                        256
 #endif /* __DTS_HIX5HD2_CLOCK_H */
diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h
new file mode 100644 (file)
index 0000000..e65803b
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Inspired by original work from pxa2xx-regs.h by Nicolas Pitre
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_PXA2XX_H__
+#define __DT_BINDINGS_CLOCK_PXA2XX_H__
+
+#define CLK_NONE 0
+#define CLK_1WIRE 1
+#define CLK_AC97 2
+#define CLK_AC97CONF 3
+#define CLK_ASSP 4
+#define CLK_BOOT 5
+#define CLK_BTUART 6
+#define CLK_CAMERA 7
+#define CLK_CIR 8
+#define CLK_CORE 9
+#define CLK_DMC 10
+#define CLK_FFUART 11
+#define CLK_FICP 12
+#define CLK_GPIO 13
+#define CLK_HSIO2 14
+#define CLK_HWUART 15
+#define CLK_I2C 16
+#define CLK_I2S 17
+#define CLK_IM 18
+#define CLK_INC 19
+#define CLK_ISC 20
+#define CLK_KEYPAD 21
+#define CLK_LCD 22
+#define CLK_MEMC 23
+#define CLK_MEMSTK 24
+#define CLK_MINI_IM 25
+#define CLK_MINI_LCD 26
+#define CLK_MMC 27
+#define CLK_MMC1 28
+#define CLK_MMC2 29
+#define CLK_MMC3 30
+#define CLK_MSL 31
+#define CLK_MSL0 32
+#define CLK_MVED 33
+#define CLK_NAND 34
+#define CLK_NSSP 35
+#define CLK_OSTIMER 36
+#define CLK_PWM0 37
+#define CLK_PWM1 38
+#define CLK_PWM2 39
+#define CLK_PWM3 40
+#define CLK_PWRI2C 41
+#define CLK_PXA300_GCU 42
+#define CLK_PXA320_GCU 43
+#define CLK_SMC 44
+#define CLK_SSP 45
+#define CLK_SSP1 46
+#define CLK_SSP2 47
+#define CLK_SSP3 48
+#define CLK_SSP4 49
+#define CLK_STUART 50
+#define CLK_TOUCH 51
+#define CLK_TPM 52
+#define CLK_UDC 53
+#define CLK_USB 54
+#define CLK_USB2 55
+#define CLK_USBH 56
+#define CLK_USBHOST 57
+#define CLK_USIM 58
+#define CLK_USIM1 59
+#define CLK_USMI0 60
+#define CLK_MAX 61
+
+#endif
index 8a4c5892890f3c587e48dbd4d6de942e0c4a3820..6bac637fd635d16717b027b136ba70bba465276b 100644 (file)
 #define TEGRA124_CLK_DSIB_MUX 310
 #define TEGRA124_CLK_SOR0_LVDS 311
 #define TEGRA124_CLK_XUSB_SS_DIV2 312
-#define TEGRA124_CLK_CLK_MAX 313
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#define TEGRA124_CLK_CLK_MAX 315
 
 #endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
index 4ed34105c3718d11577e8fff44c92ce4b806493a..0ca5f60469203f1eb0800de4f51bf2beecc6a641 100644 (file)
@@ -46,6 +46,7 @@ struct clk {
        unsigned int            enable_count;
        unsigned int            prepare_count;
        unsigned long           accuracy;
+       int                     phase;
        struct hlist_head       children;
        struct hlist_node       child_node;
        struct hlist_node       debug_node;
index 411dd7eb265339d4fd995312248df0f2b720c700..be21af149f119394c68bd8018a8de39f2db067ee 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/of.h>
 
 #ifdef CONFIG_COMMON_CLK
 
@@ -129,6 +130,14 @@ struct dentry;
  *             set then clock accuracy will be initialized to parent accuracy
  *             or 0 (perfect clock) if clock has no parent.
  *
+ * @get_phase: Queries the hardware to get the current phase of a clock.
+ *             Returned values are 0-359 degrees on success, negative
+ *             error codes on failure.
+ *
+ * @set_phase: Shift the phase this clock signal in degrees specified
+ *             by the second argument. Valid values for degrees are
+ *             0-359. Return 0 on success, otherwise -EERROR.
+ *
  * @init:      Perform platform-specific initialization magic.
  *             This is not not used by any of the basic clock types.
  *             Please consider other ways of solving initialization problems
@@ -177,6 +186,8 @@ struct clk_ops {
                                    unsigned long parent_rate, u8 index);
        unsigned long   (*recalc_accuracy)(struct clk_hw *hw,
                                           unsigned long parent_accuracy);
+       int             (*get_phase)(struct clk_hw *hw);
+       int             (*set_phase)(struct clk_hw *hw, int degrees);
        void            (*init)(struct clk_hw *hw);
        int             (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
 };
@@ -488,6 +499,28 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
                struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
                unsigned long flags);
 
+/***
+ * struct clk_gpio_gate - gpio gated clock
+ *
+ * @hw:                handle between common and hardware-specific interfaces
+ * @gpiod:     gpio descriptor
+ *
+ * Clock with a gpio control for enabling and disabling the parent clock.
+ * Implements .enable, .disable and .is_enabled
+ */
+
+struct clk_gpio {
+       struct clk_hw   hw;
+       struct gpio_desc *gpiod;
+};
+
+extern const struct clk_ops clk_gpio_gate_ops;
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+               const char *parent_name, struct gpio_desc *gpio,
+               unsigned long flags);
+
+void of_gpio_clk_gate_setup(struct device_node *node);
+
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
index fb5e097d8f72b07fb34e480bb817593284dccdd2..38bdedd3e389bf874663b8fe65ba175a22ca668c 100644 (file)
@@ -106,6 +106,25 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
  */
 long clk_get_accuracy(struct clk *clk);
 
+/**
+ * clk_set_phase - adjust the phase shift of a clock signal
+ * @clk: clock signal source
+ * @degrees: number of degrees the signal is shifted
+ *
+ * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_phase(struct clk *clk, int degrees);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk);
+
 #else
 
 static inline long clk_get_accuracy(struct clk *clk)
@@ -113,6 +132,16 @@ static inline long clk_get_accuracy(struct clk *clk)
        return -ENOTSUPP;
 }
 
+static inline long clk_set_phase(struct clk *clk, int phase)
+{
+       return -ENOTSUPP;
+}
+
+static inline long clk_get_phase(struct clk *clk)
+{
+       return -ENOTSUPP;
+}
+
 #endif
 
 /**
index e8d8a35034a5e81c95e0471d0530f7c87a784db9..f75acbf70e9630631e03bd49e43414d7b3b4b9e3 100644 (file)
@@ -292,6 +292,7 @@ void omap2xxx_clkt_vps_init(void);
 void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
 void ti_dt_clocks_register(struct ti_dt_clk *oclks);
 void ti_dt_clk_init_provider(struct device_node *np, int index);
+void ti_dt_clk_init_retry_clks(void);
 void ti_dt_clockdomains_setup(void);
 int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
                      ti_of_clk_init_cb_t func);
index e4ae2ad48d072efcd78e3905c3f1e212130c88cc..75a227cc7ce20a70f9a7e066059a5b8317696c82 100644 (file)
@@ -55,6 +55,7 @@ struct qstr {
 #define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
 #define hashlen_hash(hashlen) ((u32) (hashlen))
 #define hashlen_len(hashlen)  ((u32)((hashlen) >> 32))
+#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
 
 struct dentry_stat_t {
        long nr_dentry;
index 6ff0b0b42d47d7d6f719a3c2f5f6b5f5f2e16167..08ed2b0a96e6aef0ada70da633322f36a8acfe7e 100644 (file)
@@ -24,6 +24,9 @@
 #define NULL_ADDR              ((block_t)0)    /* used as block_t addresses */
 #define NEW_ADDR               ((block_t)-1)   /* used as block_t addresses */
 
+/* 0, 1(node nid), 2(meta nid) are reserved node id */
+#define F2FS_RESERVED_NODE_NUM         3
+
 #define F2FS_ROOT_INO(sbi)     (sbi->root_ino_num)
 #define F2FS_NODE_INO(sbi)     (sbi->node_ino_num)
 #define F2FS_META_INO(sbi)     (sbi->meta_ino_num)
@@ -87,6 +90,8 @@ struct f2fs_super_block {
 #define CP_ORPHAN_PRESENT_FLAG 0x00000002
 #define CP_UMOUNT_FLAG         0x00000001
 
+#define F2FS_CP_PACKS          2       /* # of checkpoint packs */
+
 struct f2fs_checkpoint {
        __le64 checkpoint_ver;          /* checkpoint block version number */
        __le64 user_block_count;        /* # of user blocks */
@@ -123,6 +128,9 @@ struct f2fs_checkpoint {
  */
 #define F2FS_ORPHANS_PER_BLOCK 1020
 
+#define GET_ORPHAN_BLOCKS(n)   ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+                                       F2FS_ORPHANS_PER_BLOCK)
+
 struct f2fs_orphan_block {
        __le32 ino[F2FS_ORPHANS_PER_BLOCK];     /* inode numbers */
        __le32 reserved;        /* reserved */
@@ -144,6 +152,7 @@ struct f2fs_extent {
 #define F2FS_NAME_LEN          255
 #define F2FS_INLINE_XATTR_ADDRS        50      /* 200 bytes for inline xattrs */
 #define DEF_ADDRS_PER_INODE    923     /* Address Pointers in an Inode */
+#define DEF_NIDS_PER_INODE     5       /* Node IDs in an Inode */
 #define ADDRS_PER_INODE(fi)    addrs_per_inode(fi)
 #define ADDRS_PER_BLOCK                1018    /* Address Pointers in a Direct Block */
 #define NIDS_PER_BLOCK         1018    /* Node IDs in an Indirect Block */
@@ -163,8 +172,9 @@ struct f2fs_extent {
 #define MAX_INLINE_DATA                (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
                                                F2FS_INLINE_XATTR_ADDRS - 1))
 
-#define INLINE_DATA_OFFSET     (PAGE_CACHE_SIZE - sizeof(struct node_footer) \
-                       - sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1))
+#define INLINE_DATA_OFFSET     (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
+                               sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
+                               DEF_NIDS_PER_INODE - 1))
 
 struct f2fs_inode {
        __le16 i_mode;                  /* file mode */
@@ -194,7 +204,7 @@ struct f2fs_inode {
 
        __le32 i_addr[DEF_ADDRS_PER_INODE];     /* Pointers to data blocks */
 
-       __le32 i_nid[5];                /* direct(2), indirect(2),
+       __le32 i_nid[DEF_NIDS_PER_INODE];       /* direct(2), indirect(2),
                                                double_indirect(1) node id */
 } __packed;
 
index c7e17de732f3eb6b4f221bb16b6a9753a9d3a85d..12f146fa660434731df9ed2d8c2f6929dcc46217 100644 (file)
@@ -38,60 +38,32 @@ enum gpiod_flags {
 struct gpio_desc *__must_check __gpiod_get(struct device *dev,
                                         const char *con_id,
                                         enum gpiod_flags flags);
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
 struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
                                               const char *con_id,
                                               unsigned int idx,
                                               enum gpiod_flags flags);
-#define __gpiod_get_index(dev, con_id, index, flags, ...)              \
-       __gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
 struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
                                                  const char *con_id,
                                                  enum gpiod_flags flags);
-#define __gpiod_get_optional(dev, con_id, flags, ...)                  \
-       __gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
 struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
                                                        const char *con_id,
                                                        unsigned int index,
                                                        enum gpiod_flags flags);
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...)     \
-       __gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...)                           \
-       __gpiod_get_index_optional(varargs, 0)
-
 void gpiod_put(struct gpio_desc *desc);
 
 struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
                                              const char *con_id,
                                              enum gpiod_flags flags);
-#define __devm_gpiod_get(dev, con_id, flags, ...)                      \
-       __devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
 struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
                                                    const char *con_id,
                                                    unsigned int idx,
                                                    enum gpiod_flags flags);
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...)         \
-       __devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
 struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
                                                       const char *con_id,
                                                       enum gpiod_flags flags);
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...)             \
-       __devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...)                            \
-       __devm_gpiod_get_optional(varargs, 0)
 struct gpio_desc *__must_check
 __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
                              unsigned int index, enum gpiod_flags flags);
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...)        \
-       __devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...)                      \
-       __devm_gpiod_get_index_optional(varargs, 0)
-
 void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
 
 int gpiod_get_direction(const struct gpio_desc *desc);
@@ -124,27 +96,31 @@ int desc_to_gpio(const struct gpio_desc *desc);
 
 #else /* CONFIG_GPIOLIB */
 
-static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
-                                                      const char *con_id)
+static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+                                               const char *con_id,
+                                               enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
-static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
-                                                            const char *con_id,
-                                                            unsigned int idx)
+static inline struct gpio_desc *__must_check
+__gpiod_get_index(struct device *dev,
+                 const char *con_id,
+                 unsigned int idx,
+                 enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-gpiod_get_optional(struct device *dev, const char *con_id)
+__gpiod_get_optional(struct device *dev, const char *con_id,
+                    enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-gpiod_get_index_optional(struct device *dev, const char *con_id,
-                        unsigned int index)
+__gpiod_get_index_optional(struct device *dev, const char *con_id,
+                          unsigned int index, enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
@@ -157,28 +133,33 @@ static inline void gpiod_put(struct gpio_desc *desc)
        WARN_ON(1);
 }
 
-static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
-                                                           const char *con_id)
+static inline struct gpio_desc *__must_check
+__devm_gpiod_get(struct device *dev,
+                const char *con_id,
+                enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
 static inline
-struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
-                                                   const char *con_id,
-                                                   unsigned int idx)
+struct gpio_desc *__must_check
+__devm_gpiod_get_index(struct device *dev,
+                      const char *con_id,
+                      unsigned int idx,
+                      enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-devm_gpiod_get_optional(struct device *dev, const char *con_id)
+__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+                         enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
-                             unsigned int index)
+__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+                               unsigned int index, enum gpiod_flags flags)
 {
        return ERR_PTR(-ENOSYS);
 }
@@ -303,9 +284,43 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
        return -EINVAL;
 }
 
-
 #endif /* CONFIG_GPIOLIB */
 
+/*
+ * Vararg-hacks! This is done to transition the kernel to always pass
+ * the options flags argument to the below functions. During a transition
+ * phase these vararg macros make both old-and-newstyle code compile,
+ * but when all calls to the elder API are removed, these should go away
+ * and the __gpiod_get() etc functions above be renamed just gpiod_get()
+ * etc.
+ */
+#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
+#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
+#define __gpiod_get_index(dev, con_id, index, flags, ...)              \
+       __gpiod_get_index(dev, con_id, index, flags)
+#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
+#define __gpiod_get_optional(dev, con_id, flags, ...)                  \
+       __gpiod_get_optional(dev, con_id, flags)
+#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
+#define __gpiod_get_index_optional(dev, con_id, index, flags, ...)     \
+       __gpiod_get_index_optional(dev, con_id, index, flags)
+#define gpiod_get_index_optional(varargs...)                           \
+       __gpiod_get_index_optional(varargs, 0)
+#define __devm_gpiod_get(dev, con_id, flags, ...)                      \
+       __devm_gpiod_get(dev, con_id, flags)
+#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
+#define __devm_gpiod_get_index(dev, con_id, index, flags, ...)         \
+       __devm_gpiod_get_index(dev, con_id, index, flags)
+#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
+#define __devm_gpiod_get_optional(dev, con_id, flags, ...)             \
+       __devm_gpiod_get_optional(dev, con_id, flags)
+#define devm_gpiod_get_optional(varargs...)                            \
+       __devm_gpiod_get_optional(varargs, 0)
+#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...)        \
+       __devm_gpiod_get_index_optional(dev, con_id, index, flags)
+#define devm_gpiod_get_index_optional(varargs...)                      \
+       __devm_gpiod_get_index_optional(varargs, 0)
+
 #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
index bd1754c7ecef49a121a74fea6efaac15234384a5..d0494c399392c801dfce96d8a141d935d750a22e 100644 (file)
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+       hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
        u64 n = hash;
        n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
        hash += n;
        n <<= 2;
        hash += n;
+#endif
 
        /* High bits are more random, so use them. */
        return hash >> (64 - bits);
index 1f44466c1e9d7b056cbdcd4f7c043018f069fad4..c367cbdf73ab1a5b83f1af48c848be21b466167d 100644 (file)
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
 #define SEC_JIFFIE_SC (32 - SHIFT_HZ)
 #endif
 #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
 #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
                                 TICK_NSEC -1) / (u64)TICK_NSEC))
 
 #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
                                         TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION  \
-                    ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
-                                        TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion.  See there
- * for more details.  It is the scaled resolution rounding value.  Note
- * that it is a 64-bit value.  Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
 /*
  * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
  * into seconds.  The 64-bit case will overflow if we are not careful,
index 6a599dce7f9d4678e4f6f7f082514308f0636942..e436864721971c81383a323bf3951e19632475a0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/rwsem.h>
+#include <linux/timer.h>
 #include <linux/workqueue.h>
 
 struct device;
@@ -68,7 +69,7 @@ struct led_classdev {
        const char              *default_trigger;       /* Trigger to use */
 
        unsigned long            blink_delay_on, blink_delay_off;
-       struct delayed_work      blink_work;
+       struct timer_list        blink_timer;
        int                      blink_brightness;
 
        struct work_struct      set_brightness_work;
index 071f6b234604cf770ebfab55e1ec61392e364a83..511c6e0d21a9ab0045c9dd8b2dc7f9c488730279 100644 (file)
@@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
                                  enum mlx4_net_trans_rule_id id);
 int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
 
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+                         int port, int qpn, u16 prio, u64 *reg_id);
+
 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
                          int i, int val);
 
index 3083c53e027049196f8f18561cd1d13f51b72a07..c300db3ae2852b88321c618daeb10bdc4eb5b21b 100644 (file)
@@ -949,7 +949,7 @@ static inline int jedec_feature(struct nand_chip *chip)
                : 0;
 }
 
-/**
+/*
  * struct nand_sdr_timings - SDR NAND chip timings
  *
  * This struct defines the timing requirements of a SDR NAND chip.
index 38377392d08261291ceb1fcd177718484e05581c..c8e388e5fcccfa604d087ce33ccee5ace528e598 100644 (file)
@@ -3176,7 +3176,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
 }
 
 /**
- *  __dev_uc_unsync - Remove synchonized addresses from device
+ *  __dev_uc_unsync - Remove synchronized addresses from device
  *  @dev:  device to sync
  *  @unsync: function to call if address should be removed
  *
@@ -3220,7 +3220,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
 }
 
 /**
- *  __dev_mc_unsync - Remove synchonized addresses from device
+ *  __dev_mc_unsync - Remove synchronized addresses from device
  *  @dev:  device to sync
  *  @unsync: function to call if address should be removed
  *
index 2077489f98873bcbe4ca083cd1f0eb1b99eeab5e..2517ece988209a611b324a0bb8ade2b566eeb645 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/in6.h>
 #include <linux/wait.h>
 #include <linux/list.h>
+#include <linux/static_key.h>
 #include <uapi/linux/netfilter.h>
 #ifdef CONFIG_NETFILTER
 static inline int NF_DROP_GETERR(int verdict)
@@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-#if defined(CONFIG_JUMP_LABEL)
-#include <linux/static_key.h>
+#ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
 static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
 {
        if (__builtin_constant_p(pf) &&
index 7c1d252b20c08de0ec725976c67b6fc13326e2e0..ebc4c76ffb737bae3d9be1a6325752745254eaed 100644 (file)
@@ -60,7 +60,7 @@ struct generic_pm_domain {
        struct mutex lock;
        struct dev_power_governor *gov;
        struct work_struct power_off_work;
-       char *name;
+       const char *name;
        unsigned int in_progress;       /* Number of devices being suspended now */
        atomic_t sd_count;      /* Number of subdomains with power "on" */
        enum gpd_status status; /* Current state of the domain */
index bbe03a1924c04182be79d128854557ebb262b6e3..4efa1ed8a2b0b92ebdd7160948e45b79b0e19fdf 100644 (file)
@@ -218,6 +218,8 @@ enum regulator_type {
  * @linear_min_sel: Minimal selector for starting linear mapping
  * @fixed_uV: Fixed voltage of rails.
  * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @linear_ranges: A constant table of possible voltage ranges.
+ * @n_linear_ranges: Number of entries in the @linear_ranges table.
  * @volt_table: Voltage mapping table (if table based mapping)
  *
  * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
index 730e638c55899ba9bfff37a045386af5ee0f5cfe..0b08d05d470b56cacec467d5368810517d4a1752 100644 (file)
@@ -85,6 +85,7 @@ struct regulator_state {
  *           bootloader then it will be enabled when the constraints are
  *           applied.
  * @apply_uV: Apply the voltage constraint when initialising.
+ * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
  *
  * @input_uV: Input voltage for regulator when supplied by another regulator.
  *
index 059052306831496d88de2f143930a32593b8ac79..9a82c7dc3fdd9f1fd35d62fb2ab0734516c9ad6c 100644 (file)
@@ -183,13 +183,8 @@ static inline bool tick_nohz_full_cpu(int cpu)
 
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
+extern void tick_nohz_full_kick(void);
 extern void tick_nohz_full_kick_cpu(int cpu);
-
-static inline void tick_nohz_full_kick(void)
-{
-       tick_nohz_full_kick_cpu(smp_processor_id());
-}
-
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
index b5d5af3aa469961af36b30b0aea2da1d4e959bac..6f884e6c731e41daccdf4119cb1f0f8a4ee9335f 100644 (file)
@@ -464,6 +464,8 @@ struct hci_conn_params {
                HCI_AUTO_CONN_ALWAYS,
                HCI_AUTO_CONN_LINK_LOSS,
        } auto_connect;
+
+       struct hci_conn *conn;
 };
 
 extern struct list_head hci_dev_list;
index e2070960bac009223c1c6caa1324d6323a478a7d..8170f8d7052b21f5b8e82d8cb7c1c52a56042378 100644 (file)
@@ -16,7 +16,6 @@ struct netns_sysctl_lowpan {
 struct netns_ieee802154_lowpan {
        struct netns_sysctl_lowpan sysctl;
        struct netns_frags      frags;
-       int                     max_dsize;
 };
 
 #endif
index 259992444e80ae0b88eaec4ff345d23fd8f81c75..dad7ab20a8cb204f08b2ecda724d5c1db5138ec8 100644 (file)
@@ -167,7 +167,7 @@ struct ieee80211_reg_rule {
 struct ieee80211_regdomain {
        struct rcu_head rcu_head;
        u32 n_reg_rules;
-       char alpha2[2];
+       char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        struct ieee80211_reg_rule reg_rules[];
 };
index f6e7397e799dbd9dd1007c8f4e494c4b3405d28e..9fbd856e67139f9525b2951b147ed6f9223f7540 100644 (file)
@@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
        return asoc ? asoc->assoc_id : 0;
 }
 
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+       /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+        * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+        * start at =1 in user space, but actually as =0 in kernel space.
+        * Now that we can not break user space and SCTP_EMPTY is exposed
+        * there, we need to fix it up with an ugly offset not to break
+        * applications. :(
+        */
+       return asoc->state + 1;
+}
+
 /* Look up the association by its id.  */
 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
 
index 7f2ab72f321a4bd437800ab551a5aff3a9eab44a..b9a5bd0ed9f3a6b7bc0d087b2b2ae5563b4a0304 100644 (file)
@@ -2165,9 +2165,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
         */
        if (sock_flag(sk, SOCK_RCVTSTAMP) ||
            (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
-           (kt.tv64 &&
-            (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
-             skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
+           (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
            (hwtstamps->hwtstamp.tv64 &&
             (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
                __sock_recv_timestamp(msg, sk, skb);
index e52ef5357e088b568e958de0d8545953ab90cef5..c52b68577cb0edc0d00732cc67441a4ac30595be 100644 (file)
@@ -290,7 +290,7 @@ struct wimax_dev;
  *     This operation has to be synchronous, and return only when the
  *     reset is complete. In case of having had to resort to bus/cold
  *     reset implying a device disconnection, the call is allowed to
- *     return inmediately.
+ *     return immediately.
  *     NOTE: wimax_dev->mutex is NOT locked when this op is being
  *     called; however, wimax_dev->mutex_reset IS locked to ensure
  *     serialization of calls to wimax_reset().
index be6ecae247b0c608df5aa2554079d6bc4efca482..c83a334dd00fa9186404917183115fca3124670c 100644 (file)
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \
                  SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
        .tlv.c = (snd_soc_bytes_tlv_callback), \
-       .info = snd_soc_info_bytes_ext, \
+       .info = snd_soc_bytes_info_ext, \
        .private_value = (unsigned long)&(struct soc_bytes_ext) \
                {.max = xcount, .get = xhandler_get, .put = xhandler_put, } }
 #define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \
index 1c09820df58564f8d0430d996998edc6f8d893c0..3608bebd3d9c5e58a0349c6240a8670bd3d8bbfb 100644 (file)
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq,
  * @vec_nr:  softirq vector number
  *
  * When used in combination with the softirq_exit tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
  */
 DEFINE_EVENT(softirq, softirq_entry,
 
@@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry,
  * @vec_nr:  softirq vector number
  *
  * When used in combination with the softirq_entry tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
  */
 DEFINE_EVENT(softirq, softirq_exit,
 
index 24e9033f8b3f9f10fde3467b9122d59c9621edcd..be88166349a128ca80e521ee2cef80ca8617a702 100644 (file)
@@ -240,6 +240,7 @@ header-y += matroxfb.h
 header-y += mdio.h
 header-y += media.h
 header-y += mei.h
+header-y += memfd.h
 header-y += mempolicy.h
 header-y += meye.h
 header-y += mic_common.h
@@ -395,6 +396,7 @@ header-y += un.h
 header-y += unistd.h
 header-y += unix_diag.h
 header-y += usbdevice_fs.h
+header-y += usbip.h
 header-y += utime.h
 header-y += utsname.h
 header-y += uuid.h
index 19df18c9b8be371ec5d13fc09f69fc33f2ff7769..1874ebe9ac1e505b0215a90f03a9b2ce57f6b2ff 100644 (file)
@@ -165,6 +165,7 @@ struct input_keymap_entry {
 #define INPUT_PROP_BUTTONPAD           0x02    /* has button(s) under pad */
 #define INPUT_PROP_SEMI_MT             0x03    /* touch rectangle only */
 #define INPUT_PROP_TOPBUTTONPAD                0x04    /* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK      0x05    /* is a pointing stick */
 
 #define INPUT_PROP_MAX                 0x1f
 #define INPUT_PROP_CNT                 (INPUT_PROP_MAX + 1)
index 131a6ccdba25693e6899b7813d99a6934e846ce4..14334d0161d5db26746af589920d10757209a5fb 100644 (file)
@@ -53,6 +53,9 @@
 /* operation as Dom0 is supported */
 #define XENFEAT_dom0                      11
 
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity        12
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */
index 7dc8788cfd52dd222856057840a8d0a45a19816e..940aced4ed00ca5c1de330c6609c78f6a7c5b3c2 100644 (file)
@@ -1035,6 +1035,11 @@ static void cgroup_get(struct cgroup *cgrp)
        css_get(&cgrp->self);
 }
 
+static bool cgroup_tryget(struct cgroup *cgrp)
+{
+       return css_tryget(&cgrp->self);
+}
+
 static void cgroup_put(struct cgroup *cgrp)
 {
        css_put(&cgrp->self);
@@ -1147,7 +1152,8 @@ static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
         * protection against removal.  Ensure @cgrp stays accessible and
         * break the active_ref protection.
         */
-       cgroup_get(cgrp);
+       if (!cgroup_tryget(cgrp))
+               return NULL;
        kernfs_break_active_protection(kn);
 
        mutex_lock(&cgroup_mutex);
@@ -3271,8 +3277,17 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        struct cftype *cft;
 
-       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
-               cft->flags |= __CFTYPE_NOT_ON_DFL;
+       /*
+        * If legacy_flies_on_dfl, we want to show the legacy files on the
+        * dfl hierarchy but iff the target subsystem hasn't been updated
+        * for the dfl hierarchy yet.
+        */
+       if (!cgroup_legacy_files_on_dfl ||
+           ss->dfl_cftypes != ss->legacy_cftypes) {
+               for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+                       cft->flags |= __CFTYPE_NOT_ON_DFL;
+       }
+
        return cgroup_add_cftypes(ss, cfts);
 }
 
@@ -4387,6 +4402,15 @@ static void css_release_work_fn(struct work_struct *work)
                /* cgroup release path */
                cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
                cgrp->id = -1;
+
+               /*
+                * There are two control paths which try to determine
+                * cgroup from dentry without going through kernfs -
+                * cgroupstats_build() and css_tryget_online_from_dir().
+                * Those are supported by RCU protecting clearing of
+                * cgrp->kn->priv backpointer.
+                */
+               RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
        }
 
        mutex_unlock(&cgroup_mutex);
@@ -4543,6 +4567,11 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
        struct cftype *base_files;
        int ssid, ret;
 
+       /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
+        */
+       if (strchr(name, '\n'))
+               return -EINVAL;
+
        parent = cgroup_kn_lock_live(parent_kn);
        if (!parent)
                return -ENODEV;
@@ -4820,16 +4849,6 @@ static int cgroup_rmdir(struct kernfs_node *kn)
 
        cgroup_kn_unlock(kn);
 
-       /*
-        * There are two control paths which try to determine cgroup from
-        * dentry without going through kernfs - cgroupstats_build() and
-        * css_tryget_online_from_dir().  Those are supported by RCU
-        * protecting clearing of cgrp->kn->priv backpointer, which should
-        * happen after all files under it have been removed.
-        */
-       if (!ret)
-               RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL);
-
        cgroup_put(cgrp);
        return ret;
 }
@@ -5416,7 +5435,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
        /*
         * This path doesn't originate from kernfs and @kn could already
         * have been or be removed at any point.  @kn->priv is RCU
-        * protected for this access.  See cgroup_rmdir() for details.
+        * protected for this access.  See css_release_work_fn() for details.
         */
        cgrp = rcu_dereference(kn->priv);
        if (cgrp)
index 633394f442f8a3cf63bc30b160b5345589ea57b7..ebb3c369d03d594067bda50bdffedd32159026e3 100644 (file)
@@ -226,7 +226,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
        ret = hrtimer_nanosleep_restart(restart);
        set_fs(oldfs);
 
-       if (ret) {
+       if (ret == -ERESTART_RESTARTBLOCK) {
                rmtp = restart->nanosleep.compat_rmtp;
 
                if (rmtp && compat_put_timespec(&rmt, rmtp))
@@ -256,7 +256,26 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
                                HRTIMER_MODE_REL, CLOCK_MONOTONIC);
        set_fs(oldfs);
 
-       if (ret) {
+       /*
+        * hrtimer_nanosleep() can only return 0 or
+        * -ERESTART_RESTARTBLOCK here because:
+        *
+        * - we call it with HRTIMER_MODE_REL and therefor exclude the
+        *   -ERESTARTNOHAND return path.
+        *
+        * - we supply the rmtp argument from the task stack (due to
+        *   the necessary compat conversion. So the update cannot
+        *   fail, which excludes the -EFAULT return path as well. If
+        *   it fails nevertheless we have a bigger problem and wont
+        *   reach this place anymore.
+        *
+        * - if the return value is 0, we do not have to update rmtp
+        *    because there is no remaining time.
+        *
+        * We check for -ERESTART_RESTARTBLOCK nevertheless if the
+        * core implementation decides to return random nonsense.
+        */
+       if (ret == -ERESTART_RESTARTBLOCK) {
                struct restart_block *restart
                        = &current_thread_info()->restart_block;
 
@@ -266,7 +285,6 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
                if (rmtp && compat_put_timespec(&rmt, rmtp))
                        return -EFAULT;
        }
-
        return ret;
 }
 
index d3a9d946d0b7f918e5622a7c6e2ef8c9fe88775c..815d7af2ffe8c6b195e729625ef5bfecb399accb 100644 (file)
@@ -2592,6 +2592,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
         * shared futexes. We need to compare the keys:
         */
        if (match_futex(&q.key, &key2)) {
+               queue_unlock(hb);
                ret = -EINVAL;
                goto out_put_keys;
        }
index a2b28a2fd7b16d270c45b3c40719a35d0034c58d..6223fab9a9d22b7bedd5e6b1f23ccd8a0347d6d1 100644 (file)
@@ -517,6 +517,7 @@ out:
                chip->irq_eoi(&desc->irq_data);
        raw_spin_unlock(&desc->lock);
 }
+EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
 
 /**
  *     handle_edge_irq - edge type IRQ handler
index e30ac0fe61c3ded5533cb4db8e95b2d41dcf2ac8..0aa69ea1d8fdcfa68046aa75b03c4373783a02fa 100644 (file)
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
  */
 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
 {
-       long ret;
+       long t1, t2;
 
-       ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+       t1 = kptr_obfuscate((long)v1, type);
+       t2 = kptr_obfuscate((long)v2, type);
 
-       return (ret < 0) | ((ret > 0) << 1);
+       return (t1 < t2) | ((t1 > t2) << 1);
 }
 
 /* The caller must have pinned the task */
index 5d49dcac2537045a6e23b4e1941164e2f59a0402..2df883a9d3cb26bcf35e2d2e2294226793a2563b 100644 (file)
@@ -179,6 +179,7 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
 
 #ifdef CONFIG_SUSPEND
 /* kernel/power/suspend.c */
+extern const char *pm_labels[];
 extern const char *pm_states[];
 
 extern int suspend_devices_and_enter(suspend_state_t state);
index 6dadb25cb0d8023f987c2c0961f1f81b7fedb0ce..18c62195660f6c6c458d74346ee7979ec4388db4 100644 (file)
@@ -31,7 +31,7 @@
 
 #include "power.h"
 
-static const char *pm_labels[] = { "mem", "standby", "freeze", };
+const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
 const char *pm_states[PM_SUSPEND_MAX];
 
 static const struct platform_suspend_ops *suspend_ops;
index 2f524928b6aac0a1e1cd108aef54d23c88a291a9..bd91bc177c93a65ca757c70467cd6e235af6a67c 100644 (file)
@@ -129,20 +129,20 @@ static int __init has_wakealarm(struct device *dev, const void *data)
  * at startup time.  They're normally disabled, for faster boot and because
  * we can't know which states really work on this particular system.
  */
-static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
+static const char *test_state_label __initdata;
 
 static char warn_bad_state[] __initdata =
        KERN_WARNING "PM: can't test '%s' suspend state\n";
 
 static int __init setup_test_suspend(char *value)
 {
-       suspend_state_t i;
+       int i;
 
        /* "=mem" ==> "mem" */
        value++;
-       for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
-               if (!strcmp(pm_states[i], value)) {
-                       test_state = i;
+       for (i = 0; pm_labels[i]; i++)
+               if (!strcmp(pm_labels[i], value)) {
+                       test_state_label = pm_labels[i];
                        return 0;
                }
 
@@ -158,13 +158,21 @@ static int __init test_suspend(void)
 
        struct rtc_device       *rtc = NULL;
        struct device           *dev;
+       suspend_state_t test_state;
 
        /* PM is initialized by now; is that state testable? */
-       if (test_state == PM_SUSPEND_ON)
-               goto done;
-       if (!pm_states[test_state]) {
-               printk(warn_bad_state, pm_states[test_state]);
-               goto done;
+       if (!test_state_label)
+               return 0;
+
+       for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) {
+               const char *state_label = pm_states[test_state];
+
+               if (state_label && !strcmp(test_state_label, state_label))
+                       break;
+       }
+       if (test_state == PM_SUSPEND_MAX) {
+               printk(warn_bad_state, test_state_label);
+               return 0;
        }
 
        /* RTCs have initialized by now too ... can we use one? */
@@ -173,13 +181,12 @@ static int __init test_suspend(void)
                rtc = rtc_class_open(dev_name(dev));
        if (!rtc) {
                printk(warn_no_rtc);
-               goto done;
+               return 0;
        }
 
        /* go for it */
        test_wakealarm(rtc, test_state);
        rtc_class_close(rtc);
-done:
        return 0;
 }
 late_initcall(test_suspend);
index e04c455a0e3860b2aa5784d4c2522a21300525e4..1ce770687ea83c6a05c562591fe475b5f2cc52d0 100644 (file)
@@ -1665,15 +1665,15 @@ asmlinkage int vprintk_emit(int facility, int level,
        raw_spin_lock(&logbuf_lock);
        logbuf_cpu = this_cpu;
 
-       if (recursion_bug) {
+       if (unlikely(recursion_bug)) {
                static const char recursion_msg[] =
                        "BUG: recent printk recursion!";
 
                recursion_bug = 0;
-               text_len = strlen(recursion_msg);
                /* emit KERN_CRIT message */
                printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
-                                        NULL, 0, recursion_msg, text_len);
+                                        NULL, 0, recursion_msg,
+                                        strlen(recursion_msg));
        }
 
        /*
index 71e64c718f75d3f4d419c31f4546cf3fab76a4f4..6a86eb7bac45e7b540f1ab9ead7d412cf6ef60fa 100644 (file)
@@ -358,7 +358,7 @@ struct rcu_data {
        struct rcu_head **nocb_gp_tail;
        long nocb_gp_count;
        long nocb_gp_count_lazy;
-       bool nocb_leader_wake;          /* Is the nocb leader thread awake? */
+       bool nocb_leader_sleep;         /* Is the nocb leader thread asleep? */
        struct rcu_data *nocb_next_follower;
                                        /* Next follower in wakeup chain. */
 
index 00dc411e9676f92db54e0bbb3489bea9b87e731a..a7997e272564916d8ec2232b32867add334b6c77 100644 (file)
@@ -2074,9 +2074,9 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 
        if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
                return;
-       if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) {
+       if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
                /* Prior xchg orders against prior callback enqueue. */
-               ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true;
+               ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
                wake_up(&rdp_leader->nocb_wq);
        }
 }
@@ -2253,7 +2253,7 @@ wait_again:
        if (!rcu_nocb_poll) {
                trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
                wait_event_interruptible(my_rdp->nocb_wq,
-                                        ACCESS_ONCE(my_rdp->nocb_leader_wake));
+                               !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
                /* Memory barrier handled by smp_mb() calls below and repoll. */
        } else if (firsttime) {
                firsttime = false; /* Don't drown trace log with "Poll"! */
@@ -2292,12 +2292,12 @@ wait_again:
                schedule_timeout_interruptible(1);
 
                /* Rescan in case we were a victim of memory ordering. */
-               my_rdp->nocb_leader_wake = false;
-               smp_mb();  /* Ensure _wake false before scan. */
+               my_rdp->nocb_leader_sleep = true;
+               smp_mb();  /* Ensure _sleep true before scan. */
                for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
                        if (ACCESS_ONCE(rdp->nocb_head)) {
                                /* Found CB, so short-circuit next wait. */
-                               my_rdp->nocb_leader_wake = true;
+                               my_rdp->nocb_leader_sleep = false;
                                break;
                        }
                goto wait_again;
@@ -2307,17 +2307,17 @@ wait_again:
        rcu_nocb_wait_gp(my_rdp);
 
        /*
-        * We left ->nocb_leader_wake set to reduce cache thrashing.
-        * We clear it now, but recheck for new callbacks while
+        * We left ->nocb_leader_sleep unset to reduce cache thrashing.
+        * We set it now, but recheck for new callbacks while
         * traversing our follower list.
         */
-       my_rdp->nocb_leader_wake = false;
-       smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */
+       my_rdp->nocb_leader_sleep = true;
+       smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
 
        /* Each pass through the following loop wakes a follower, if needed. */
        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
                if (ACCESS_ONCE(rdp->nocb_head))
-                       my_rdp->nocb_leader_wake = true; /* No need to wait. */
+                       my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
                if (!rdp->nocb_gp_head)
                        continue; /* No CBs, so no need to wake follower. */
 
index 4aec4a457431ed50ab459e81ba622674410bff47..a7077d3ae52fe2e9bcd43bf076d748eb5bf6ef2a 100644 (file)
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
                                                        ktime_t now)
 {
+       unsigned long flags;
        struct k_itimer *ptr = container_of(alarm, struct k_itimer,
                                                it.alarm.alarmtimer);
-       if (posix_timer_event(ptr, 0) != 0)
-               ptr->it_overrun++;
+       enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+       spin_lock_irqsave(&ptr->it_lock, flags);
+       if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+               if (posix_timer_event(ptr, 0) != 0)
+                       ptr->it_overrun++;
+       }
 
        /* Re-add periodic timers */
        if (ptr->it.alarm.interval.tv64) {
                ptr->it_overrun += alarm_forward(alarm, now,
                                                ptr->it.alarm.interval);
-               return ALARMTIMER_RESTART;
+               result = ALARMTIMER_RESTART;
        }
-       return ALARMTIMER_NORESTART;
+       spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+       return result;
 }
 
 /**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
  * @new_timer: k_itimer pointer
  * @cur_setting: itimerspec data to fill
  *
- * Copies the itimerspec data out from the k_itimer
+ * Copies out the current itimerspec data
  */
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
-       memset(cur_setting, 0, sizeof(struct itimerspec));
+       ktime_t relative_expiry_time =
+               alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
+
+       if (ktime_to_ns(relative_expiry_time) > 0) {
+               cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
+       } else {
+               cur_setting->it_value.tv_sec = 0;
+               cur_setting->it_value.tv_nsec = 0;
+       }
 
-       cur_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarm.interval);
-       cur_setting->it_value =
-               ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
-       return;
+       cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
 }
 
 /**
index 99aa6ee3908fbbff83923b2a8bb4c72b06e3f281..f654a8a298fad5cac36465bdcb23fddeb854f2ef 100644 (file)
@@ -224,6 +224,20 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
        .func = nohz_full_kick_work_func,
 };
 
+/*
+ * Kick this CPU if it's full dynticks in order to force it to
+ * re-evaluate its dependency on the tick and restart it if necessary.
+ * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
+ * is NMI safe.
+ */
+void tick_nohz_full_kick(void)
+{
+       if (!tick_nohz_full_cpu(smp_processor_id()))
+               return;
+
+       irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+}
+
 /*
  * Kick the CPU if it's full dynticks in order to force it to
  * re-evaluate its dependency on the tick and restart it if necessary.
index f0294ba14634345b604ebf88ca5b5977fadfad17..a9ae20fb0b11ca879220b7c80467b7485fb3914f 100644 (file)
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
  * that a remainder subtract here would not do the right thing as the
  * resolution values don't fall on second boundries.  I.e. the line:
  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
  *
  * Rather, we just shift the bits off the right.
  *
  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
  * value to a scaled second value.
  */
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
 {
-       unsigned long sec = value->tv_sec;
-       long nsec = value->tv_nsec + TICK_NSEC - 1;
+       nsec = nsec + TICK_NSEC - 1;
 
        if (sec >= MAX_SEC_IN_JIFFIES){
                sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
                 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
 }
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+       return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
 EXPORT_SYMBOL(timespec_to_jiffies);
 
 void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
 }
 EXPORT_SYMBOL(jiffies_to_timespec);
 
-/* Same for "timeval"
- *
- * Well, almost.  The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
+ *
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
  */
 unsigned long
 timeval_to_jiffies(const struct timeval *value)
 {
-       unsigned long sec = value->tv_sec;
-       long usec = value->tv_usec;
-
-       if (sec >= MAX_SEC_IN_JIFFIES){
-               sec = MAX_SEC_IN_JIFFIES;
-               usec = 0;
-       }
-       return (((u64)sec * SEC_CONVERSION) +
-               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
-                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+       return __timespec_to_jiffies(value->tv_sec,
+                                    value->tv_usec * NSEC_PER_USEC);
 }
 EXPORT_SYMBOL(timeval_to_jiffies);
 
index fb4a9c2cf8d98db2256a6268fb8bbfb37673b2fe..ec1791fae96575ffe49c9aa1a2f42cbc6b21df2a 100644 (file)
@@ -442,11 +442,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
                tk->ntp_error = 0;
                ntp_clear();
        }
-       update_vsyscall(tk);
-       update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 
        tk_update_ktime_data(tk);
 
+       update_vsyscall(tk);
+       update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
+
        if (action & TK_MIRROR)
                memcpy(&shadow_timekeeper, &tk_core.timekeeper,
                       sizeof(tk_core.timekeeper));
index a5ce0c7f6c302058da57fe19bda51b9286cdd883..54cf309a92a5ed7c8d4af783bd8811d0b706dfd6 100644 (file)
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
 config ARCH_USE_CMPXCHG_LOCKREF
        bool
 
+config ARCH_HAS_FAST_MULTIPLIER
+       bool
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index c0b1007011e188836616509deef5876c207d32d4..2404d03e251a64ae7634d30c46aa22a08f9b4538 100644 (file)
@@ -1723,11 +1723,13 @@ ascend_old_tree:
                shortcut = assoc_array_ptr_to_shortcut(ptr);
                slot = shortcut->parent_slot;
                cursor = shortcut->back_pointer;
+               if (!cursor)
+                       goto gc_complete;
        } else {
                slot = node->parent_slot;
                cursor = ptr;
        }
-       BUG_ON(!ptr);
+       BUG_ON(!cursor);
        node = assoc_array_ptr_to_node(cursor);
        slot++;
        goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
 gc_complete:
        edit->set[0].to = new_root;
        assoc_array_apply_edit(edit);
-       edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+       array->nr_leaves_on_tree = nr_leaves_on_tree;
        return 0;
 
 enomem:
index b7d81ba143d13e01db3d6b92ea171c094bb7a1c6..9a5c1f2215585f35a8eea43eedd2566a6f459231 100644 (file)
@@ -11,7 +11,7 @@
 
 unsigned int __sw_hweight32(unsigned int w)
 {
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
        w -= (w >> 1) & 0x55555555;
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
        return __sw_hweight32((unsigned int)(w >> 32)) +
               __sw_hweight32((unsigned int)w);
 #elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
        w -= (w >> 1) & 0x5555555555555555ul;
        w =  (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
        w =  (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
index 992bf30af7595f2cbc9b385c24a06e6990dd2c49..f3c6ff596414e6f9bd1cf58071fadfd4c7a31b35 100644 (file)
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
                return check_bytes8(start, value, bytes);
 
        value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
        value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
        value64 *= 0x01010101;
        value64 |= value64 << 32;
 #else
index 70fad0c0dafb60c8f64d919b607bfb22e568afba..6ecb0d937fb5f9bde4015d20b620484f5c965f84 100644 (file)
@@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
                if (nid != NUMA_NO_NODE && nid != m_nid)
                        continue;
 
+               /* skip hotpluggable memory regions if needed */
+               if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+                       continue;
+
                if (!type_b) {
                        if (out_start)
                                *out_start = m_start;
index ec4dcf1b9562b6299f215e754768da18a36b156e..085dc6d2f876374c07a518866ecb1d25a2e8c419 100644 (file)
@@ -2534,6 +2534,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
        unsigned long long size;
        int ret = 0;
 
+       if (mem_cgroup_is_root(memcg))
+               goto done;
 retry:
        if (consume_stock(memcg, nr_pages))
                goto done;
@@ -2611,9 +2613,7 @@ nomem:
        if (!(gfp_mask & __GFP_NOFAIL))
                return -ENOMEM;
 bypass:
-       memcg = root_mem_cgroup;
-       ret = -EINTR;
-       goto retry;
+       return -EINTR;
 
 done_restock:
        if (batch > nr_pages)
@@ -2626,6 +2626,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        unsigned long bytes = nr_pages * PAGE_SIZE;
 
+       if (mem_cgroup_is_root(memcg))
+               return;
+
        res_counter_uncharge(&memcg->res, bytes);
        if (do_swap_account)
                res_counter_uncharge(&memcg->memsw, bytes);
@@ -2640,6 +2643,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
 {
        unsigned long bytes = nr_pages * PAGE_SIZE;
 
+       if (mem_cgroup_is_root(memcg))
+               return;
+
        res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
        if (do_swap_account)
                res_counter_uncharge_until(&memcg->memsw,
@@ -4093,6 +4099,46 @@ out:
        return retval;
 }
 
+static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
+                                              enum mem_cgroup_stat_index idx)
+{
+       struct mem_cgroup *iter;
+       long val = 0;
+
+       /* Per-cpu values can be negative, use a signed accumulator */
+       for_each_mem_cgroup_tree(iter, memcg)
+               val += mem_cgroup_read_stat(iter, idx);
+
+       if (val < 0) /* race ? */
+               val = 0;
+       return val;
+}
+
+static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
+{
+       u64 val;
+
+       if (!mem_cgroup_is_root(memcg)) {
+               if (!swap)
+                       return res_counter_read_u64(&memcg->res, RES_USAGE);
+               else
+                       return res_counter_read_u64(&memcg->memsw, RES_USAGE);
+       }
+
+       /*
+        * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
+        * as well as in MEM_CGROUP_STAT_RSS_HUGE.
+        */
+       val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
+       val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
+
+       if (swap)
+               val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
+
+       return val << PAGE_SHIFT;
+}
+
+
 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
                               struct cftype *cft)
 {
@@ -4102,8 +4148,12 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
 
        switch (type) {
        case _MEM:
+               if (name == RES_USAGE)
+                       return mem_cgroup_usage(memcg, false);
                return res_counter_read_u64(&memcg->res, name);
        case _MEMSWAP:
+               if (name == RES_USAGE)
+                       return mem_cgroup_usage(memcg, true);
                return res_counter_read_u64(&memcg->memsw, name);
        case _KMEM:
                return res_counter_read_u64(&memcg->kmem, name);
@@ -4572,10 +4622,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
        if (!t)
                goto unlock;
 
-       if (!swap)
-               usage = res_counter_read_u64(&memcg->res, RES_USAGE);
-       else
-               usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+       usage = mem_cgroup_usage(memcg, swap);
 
        /*
         * current_threshold points to threshold just below or equal to usage.
@@ -4673,10 +4720,10 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
 
        if (type == _MEM) {
                thresholds = &memcg->thresholds;
-               usage = res_counter_read_u64(&memcg->res, RES_USAGE);
+               usage = mem_cgroup_usage(memcg, false);
        } else if (type == _MEMSWAP) {
                thresholds = &memcg->memsw_thresholds;
-               usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+               usage = mem_cgroup_usage(memcg, true);
        } else
                BUG();
 
@@ -4762,10 +4809,10 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
 
        if (type == _MEM) {
                thresholds = &memcg->thresholds;
-               usage = res_counter_read_u64(&memcg->res, RES_USAGE);
+               usage = mem_cgroup_usage(memcg, false);
        } else if (type == _MEMSWAP) {
                thresholds = &memcg->memsw_thresholds;
-               usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+               usage = mem_cgroup_usage(memcg, true);
        } else
                BUG();
 
@@ -5525,9 +5572,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                 * core guarantees its existence.
                 */
        } else {
-               res_counter_init(&memcg->res, &root_mem_cgroup->res);
-               res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw);
-               res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
+               res_counter_init(&memcg->res, NULL);
+               res_counter_init(&memcg->memsw, NULL);
+               res_counter_init(&memcg->kmem, NULL);
                /*
                 * Deeper hierachy with use_hierarchy == false doesn't make
                 * much sense so let cgroup subsystem know about this
@@ -5969,8 +6016,9 @@ static void __mem_cgroup_clear_mc(void)
        /* we must fixup refcnts and charges */
        if (mc.moved_swap) {
                /* uncharge swap account from the old cgroup */
-               res_counter_uncharge(&mc.from->memsw,
-                                    PAGE_SIZE * mc.moved_swap);
+               if (!mem_cgroup_is_root(mc.from))
+                       res_counter_uncharge(&mc.from->memsw,
+                                            PAGE_SIZE * mc.moved_swap);
 
                for (i = 0; i < mc.moved_swap; i++)
                        css_put(&mc.from->css);
@@ -5979,8 +6027,9 @@ static void __mem_cgroup_clear_mc(void)
                 * we charged both to->res and to->memsw, so we should
                 * uncharge to->res.
                 */
-               res_counter_uncharge(&mc.to->res,
-                                    PAGE_SIZE * mc.moved_swap);
+               if (!mem_cgroup_is_root(mc.to))
+                       res_counter_uncharge(&mc.to->res,
+                                            PAGE_SIZE * mc.moved_swap);
                /* we've already done css_get(mc.to) */
                mc.moved_swap = 0;
        }
@@ -6345,7 +6394,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
        rcu_read_lock();
        memcg = mem_cgroup_lookup(id);
        if (memcg) {
-               res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+               if (!mem_cgroup_is_root(memcg))
+                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                mem_cgroup_swap_statistics(memcg, false);
                css_put(&memcg->css);
        }
@@ -6509,12 +6559,15 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 {
        unsigned long flags;
 
-       if (nr_mem)
-               res_counter_uncharge(&memcg->res, nr_mem * PAGE_SIZE);
-       if (nr_memsw)
-               res_counter_uncharge(&memcg->memsw, nr_memsw * PAGE_SIZE);
-
-       memcg_oom_recover(memcg);
+       if (!mem_cgroup_is_root(memcg)) {
+               if (nr_mem)
+                       res_counter_uncharge(&memcg->res,
+                                            nr_mem * PAGE_SIZE);
+               if (nr_memsw)
+                       res_counter_uncharge(&memcg->memsw,
+                                            nr_memsw * PAGE_SIZE);
+               memcg_oom_recover(memcg);
+       }
 
        local_irq_save(flags);
        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
index c1f2ea4a0b9960d39940c82f4ba329dad2df74df..c0a3637cdb645b0eed66c18edcd10431bfd5417b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root)
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
                if (vma->vm_start < prev) {
-                       pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev);
+                       pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
                        bug = 1;
                }
                if (vma->vm_start < pend) {
-                       pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend);
+                       pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
                        bug = 1;
                }
                if (vma->vm_start > vma->vm_end) {
-                       pr_info("vm_end %lx < vm_start %lx\n",
+                       pr_emerg("vm_end %lx < vm_start %lx\n",
                                vma->vm_end, vma->vm_start);
                        bug = 1;
                }
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
-                       pr_info("free gap %lx, correct %lx\n",
+                       pr_emerg("free gap %lx, correct %lx\n",
                               vma->rb_subtree_gap,
                               vma_compute_subtree_gap(vma));
                        bug = 1;
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root)
        for (nd = pn; nd; nd = rb_prev(nd))
                j++;
        if (i != j) {
-               pr_info("backwards %d, forwards %d\n", j, i);
+               pr_emerg("backwards %d, forwards %d\n", j, i);
                bug = 1;
        }
        return bug ? -1 : i;
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm)
                i++;
        }
        if (i != mm->map_count) {
-               pr_info("map_count %d vm_next %d\n", mm->map_count, i);
+               pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
                bug = 1;
        }
        if (highest_address != mm->highest_vm_end) {
-               pr_info("mm->highest_vm_end %lx, found %lx\n",
+               pr_emerg("mm->highest_vm_end %lx, found %lx\n",
                       mm->highest_vm_end, highest_address);
                bug = 1;
        }
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count) {
-               pr_info("map_count %d rb %d\n", mm->map_count, i);
+               pr_emerg("map_count %d rb %d\n", mm->map_count, i);
                bug = 1;
        }
        BUG_ON(bug);
index 7ed58602e71b186c46db4bf8857558f6e08ac60a..7c7ab32ee5032dad07354f438b5832649aaa044b 100644 (file)
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void)
        phys_addr_t start, end;
        u64 i;
 
+       memblock_clear_hotplug(0, -1);
+
        for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
                count += __free_memory_core(start, end);
 
index 3707c71ae4cddbec027eac857291185c662c760a..51108165f829d777e4c69abe6109cb88ca4d7e14 100644 (file)
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                            int page_start, int page_end)
 {
        const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
-       unsigned int cpu;
+       unsigned int cpu, tcpu;
        int i;
 
        for_each_possible_cpu(cpu) {
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
                        struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
 
                        *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
-                       if (!*pagep) {
-                               pcpu_free_pages(chunk, pages, populated,
-                                               page_start, page_end);
-                               return -ENOMEM;
-                       }
+                       if (!*pagep)
+                               goto err;
                }
        }
        return 0;
+
+err:
+       while (--i >= page_start)
+               __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+       for_each_possible_cpu(tcpu) {
+               if (tcpu == cpu)
+                       break;
+               for (i = page_start; i < page_end; i++)
+                       __free_page(pages[pcpu_page_idx(tcpu, i)]);
+       }
+       return -ENOMEM;
 }
 
 /**
@@ -263,6 +272,7 @@ err:
                __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
                                   page_end - page_start);
        }
+       pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
        return err;
 }
 
index 2139e30a4b4490da90c2e2811d5ecc901ff07d61..da997f9800bdeab14b47b694b09593de89db4a17 100644 (file)
@@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void)
 
        if (pcpu_setup_first_chunk(ai, fc) < 0)
                panic("Failed to initialize percpu areas.");
+
+       pcpu_free_alloc_info(ai);
 }
 
 #endif /* CONFIG_SMP */
index b50dabb3f86ab49667cb939af29cef53a03b2ac2..faff6247ac8fb8769bf58f235fd92c0deab6aaf1 100644 (file)
@@ -589,6 +589,14 @@ EXPORT_SYMBOL(hci_get_route);
 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
 {
        struct hci_dev *hdev = conn->hdev;
+       struct hci_conn_params *params;
+
+       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+                                          conn->dst_type);
+       if (params && params->conn) {
+               hci_conn_drop(params->conn);
+               params->conn = NULL;
+       }
 
        conn->state = BT_CLOSED;
 
index c32d361c0cf766478f80fb9ed8a4884f53451122..1d9c29a00568d9b6ac3eea93848ec6637e5a5a5d 100644 (file)
@@ -2536,8 +2536,13 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
 {
        struct hci_conn_params *p;
 
-       list_for_each_entry(p, &hdev->le_conn_params, list)
+       list_for_each_entry(p, &hdev->le_conn_params, list) {
+               if (p->conn) {
+                       hci_conn_drop(p->conn);
+                       p->conn = NULL;
+               }
                list_del_init(&p->action);
+       }
 
        BT_DBG("All LE pending actions cleared");
 }
@@ -2578,8 +2583,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        hci_dev_lock(hdev);
        hci_inquiry_cache_flush(hdev);
-       hci_conn_hash_flush(hdev);
        hci_pend_le_actions_clear(hdev);
+       hci_conn_hash_flush(hdev);
        hci_dev_unlock(hdev);
 
        hci_notify(hdev, HCI_DEV_DOWN);
@@ -3727,6 +3732,9 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
        if (!params)
                return;
 
+       if (params->conn)
+               hci_conn_drop(params->conn);
+
        list_del(&params->action);
        list_del(&params->list);
        kfree(params);
@@ -3757,6 +3765,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
        struct hci_conn_params *params, *tmp;
 
        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+               if (params->conn)
+                       hci_conn_drop(params->conn);
                list_del(&params->action);
                list_del(&params->list);
                kfree(params);
index be35598984d9b9b3120cd178f67f0418858b2774..a6000823f0ff34d938631bed2e39a6b17a26ac13 100644 (file)
@@ -4221,8 +4221,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_proto_connect_cfm(conn, ev->status);
 
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
-       if (params)
+       if (params) {
                list_del_init(&params->action);
+               if (params->conn) {
+                       hci_conn_drop(params->conn);
+                       params->conn = NULL;
+               }
+       }
 
 unlock:
        hci_update_background_scan(hdev);
@@ -4304,8 +4309,16 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
 
        conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
                              HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
-       if (!IS_ERR(conn))
+       if (!IS_ERR(conn)) {
+               /* Store the pointer since we don't really have any
+                * other owner of the object besides the params that
+                * triggered it. This way we can abort the connection if
+                * the parameters get removed and keep the reference
+                * count consistent once the connection is established.
+                */
+               params->conn = conn;
                return;
+       }
 
        switch (PTR_ERR(conn)) {
        case -EBUSY:
index 96238ba95f2b6954c598294243b8f65931a82284..de6662b14e1f5d7110ac1316c1362e4db6bffc5f 100644 (file)
@@ -13,8 +13,6 @@
 #include "auth_x.h"
 #include "auth_x_protocol.h"
 
-#define TEMP_TICKET_BUF_LEN    256
-
 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
 
 static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
 }
 
 static int ceph_x_decrypt(struct ceph_crypto_key *secret,
-                         void **p, void *end, void *obuf, size_t olen)
+                         void **p, void *end, void **obuf, size_t olen)
 {
        struct ceph_x_encrypt_header head;
        size_t head_len = sizeof(head);
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
                return -EINVAL;
 
        dout("ceph_x_decrypt len %d\n", len);
-       ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
-                           *p, len);
+       if (*obuf == NULL) {
+               *obuf = kmalloc(len, GFP_NOFS);
+               if (!*obuf)
+                       return -ENOMEM;
+               olen = len;
+       }
+
+       ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
        if (ret)
                return ret;
        if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
        kfree(th);
 }
 
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
-                                   struct ceph_crypto_key *secret,
-                                   void *buf, void *end)
+static int process_one_ticket(struct ceph_auth_client *ac,
+                             struct ceph_crypto_key *secret,
+                             void **p, void *end)
 {
        struct ceph_x_info *xi = ac->private;
-       int num;
-       void *p = buf;
+       int type;
+       u8 tkt_struct_v, blob_struct_v;
+       struct ceph_x_ticket_handler *th;
+       void *dbuf = NULL;
+       void *dp, *dend;
+       int dlen;
+       char is_enc;
+       struct timespec validity;
+       struct ceph_crypto_key old_key;
+       void *ticket_buf = NULL;
+       void *tp, *tpend;
+       struct ceph_timespec new_validity;
+       struct ceph_crypto_key new_session_key;
+       struct ceph_buffer *new_ticket_blob;
+       unsigned long new_expires, new_renew_after;
+       u64 new_secret_id;
        int ret;
-       char *dbuf;
-       char *ticket_buf;
-       u8 reply_struct_v;
 
-       dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!dbuf)
-               return -ENOMEM;
+       ceph_decode_need(p, end, sizeof(u32) + 1, bad);
 
-       ret = -ENOMEM;
-       ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
-       if (!ticket_buf)
-               goto out_dbuf;
+       type = ceph_decode_32(p);
+       dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
 
-       ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
-       reply_struct_v = ceph_decode_8(&p);
-       if (reply_struct_v != 1)
+       tkt_struct_v = ceph_decode_8(p);
+       if (tkt_struct_v != 1)
                goto bad;
-       num = ceph_decode_32(&p);
-       dout("%d tickets\n", num);
-       while (num--) {
-               int type;
-               u8 tkt_struct_v, blob_struct_v;
-               struct ceph_x_ticket_handler *th;
-               void *dp, *dend;
-               int dlen;
-               char is_enc;
-               struct timespec validity;
-               struct ceph_crypto_key old_key;
-               void *tp, *tpend;
-               struct ceph_timespec new_validity;
-               struct ceph_crypto_key new_session_key;
-               struct ceph_buffer *new_ticket_blob;
-               unsigned long new_expires, new_renew_after;
-               u64 new_secret_id;
-
-               ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
-
-               type = ceph_decode_32(&p);
-               dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
-
-               tkt_struct_v = ceph_decode_8(&p);
-               if (tkt_struct_v != 1)
-                       goto bad;
-
-               th = get_ticket_handler(ac, type);
-               if (IS_ERR(th)) {
-                       ret = PTR_ERR(th);
-                       goto out;
-               }
 
-               /* blob for me */
-               dlen = ceph_x_decrypt(secret, &p, end, dbuf,
-                                     TEMP_TICKET_BUF_LEN);
-               if (dlen <= 0) {
-                       ret = dlen;
-                       goto out;
-               }
-               dout(" decrypted %d bytes\n", dlen);
-               dend = dbuf + dlen;
-               dp = dbuf;
+       th = get_ticket_handler(ac, type);
+       if (IS_ERR(th)) {
+               ret = PTR_ERR(th);
+               goto out;
+       }
 
-               tkt_struct_v = ceph_decode_8(&dp);
-               if (tkt_struct_v != 1)
-                       goto bad;
+       /* blob for me */
+       dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+       if (dlen <= 0) {
+               ret = dlen;
+               goto out;
+       }
+       dout(" decrypted %d bytes\n", dlen);
+       dp = dbuf;
+       dend = dp + dlen;
 
-               memcpy(&old_key, &th->session_key, sizeof(old_key));
-               ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
-               if (ret)
-                       goto out;
+       tkt_struct_v = ceph_decode_8(&dp);
+       if (tkt_struct_v != 1)
+               goto bad;
 
-               ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
-               ceph_decode_timespec(&validity, &new_validity);
-               new_expires = get_seconds() + validity.tv_sec;
-               new_renew_after = new_expires - (validity.tv_sec / 4);
-               dout(" expires=%lu renew_after=%lu\n", new_expires,
-                    new_renew_after);
+       memcpy(&old_key, &th->session_key, sizeof(old_key));
+       ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+       if (ret)
+               goto out;
 
-               /* ticket blob for service */
-               ceph_decode_8_safe(&p, end, is_enc, bad);
-               tp = ticket_buf;
-               if (is_enc) {
-                       /* encrypted */
-                       dout(" encrypted ticket\n");
-                       dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
-                                             TEMP_TICKET_BUF_LEN);
-                       if (dlen < 0) {
-                               ret = dlen;
-                               goto out;
-                       }
-                       dlen = ceph_decode_32(&tp);
-               } else {
-                       /* unencrypted */
-                       ceph_decode_32_safe(&p, end, dlen, bad);
-                       ceph_decode_need(&p, end, dlen, bad);
-                       ceph_decode_copy(&p, ticket_buf, dlen);
+       ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+       ceph_decode_timespec(&validity, &new_validity);
+       new_expires = get_seconds() + validity.tv_sec;
+       new_renew_after = new_expires - (validity.tv_sec / 4);
+       dout(" expires=%lu renew_after=%lu\n", new_expires,
+            new_renew_after);
+
+       /* ticket blob for service */
+       ceph_decode_8_safe(p, end, is_enc, bad);
+       if (is_enc) {
+               /* encrypted */
+               dout(" encrypted ticket\n");
+               dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+               if (dlen < 0) {
+                       ret = dlen;
+                       goto out;
                }
-               tpend = tp + dlen;
-               dout(" ticket blob is %d bytes\n", dlen);
-               ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
-               blob_struct_v = ceph_decode_8(&tp);
-               new_secret_id = ceph_decode_64(&tp);
-               ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
-               if (ret)
+               tp = ticket_buf;
+               dlen = ceph_decode_32(&tp);
+       } else {
+               /* unencrypted */
+               ceph_decode_32_safe(p, end, dlen, bad);
+               ticket_buf = kmalloc(dlen, GFP_NOFS);
+               if (!ticket_buf) {
+                       ret = -ENOMEM;
                        goto out;
-
-               /* all is well, update our ticket */
-               ceph_crypto_key_destroy(&th->session_key);
-               if (th->ticket_blob)
-                       ceph_buffer_put(th->ticket_blob);
-               th->session_key = new_session_key;
-               th->ticket_blob = new_ticket_blob;
-               th->validity = new_validity;
-               th->secret_id = new_secret_id;
-               th->expires = new_expires;
-               th->renew_after = new_renew_after;
-               dout(" got ticket service %d (%s) secret_id %lld len %d\n",
-                    type, ceph_entity_type_name(type), th->secret_id,
-                    (int)th->ticket_blob->vec.iov_len);
-               xi->have_keys |= th->service;
+               }
+               tp = ticket_buf;
+               ceph_decode_need(p, end, dlen, bad);
+               ceph_decode_copy(p, ticket_buf, dlen);
        }
+       tpend = tp + dlen;
+       dout(" ticket blob is %d bytes\n", dlen);
+       ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+       blob_struct_v = ceph_decode_8(&tp);
+       new_secret_id = ceph_decode_64(&tp);
+       ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+       if (ret)
+               goto out;
+
+       /* all is well, update our ticket */
+       ceph_crypto_key_destroy(&th->session_key);
+       if (th->ticket_blob)
+               ceph_buffer_put(th->ticket_blob);
+       th->session_key = new_session_key;
+       th->ticket_blob = new_ticket_blob;
+       th->validity = new_validity;
+       th->secret_id = new_secret_id;
+       th->expires = new_expires;
+       th->renew_after = new_renew_after;
+       dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+            type, ceph_entity_type_name(type), th->secret_id,
+            (int)th->ticket_blob->vec.iov_len);
+       xi->have_keys |= th->service;
 
-       ret = 0;
 out:
        kfree(ticket_buf);
-out_dbuf:
        kfree(dbuf);
        return ret;
 
@@ -270,6 +255,34 @@ bad:
        goto out;
 }
 
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+                                   struct ceph_crypto_key *secret,
+                                   void *buf, void *end)
+{
+       void *p = buf;
+       u8 reply_struct_v;
+       u32 num;
+       int ret;
+
+       ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+       if (reply_struct_v != 1)
+               return -EINVAL;
+
+       ceph_decode_32_safe(&p, end, num, bad);
+       dout("%d tickets\n", num);
+
+       while (num--) {
+               ret = process_one_ticket(ac, secret, &p, end);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+
+bad:
+       return -EINVAL;
+}
+
 static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
                                   struct ceph_x_ticket_handler *th,
                                   struct ceph_x_authorizer *au)
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th;
        int ret = 0;
        struct ceph_x_authorize_reply reply;
+       void *preply = &reply;
        void *p = au->reply_buf;
        void *end = p + sizeof(au->reply_buf);
 
        th = get_ticket_handler(ac, au->service);
        if (IS_ERR(th))
                return PTR_ERR(th);
-       ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
+       ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
        if (ret < 0)
                return ret;
        if (ret != sizeof(reply))
index 067d3af2eaf61be41d601dc4324fc1a1565ab27d..61fcfc304f6869959b2ff510026ccb2e33700f21 100644 (file)
@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
        if (!m) {
                pr_info("alloc_msg unknown type %d\n", type);
                *skip = 1;
+       } else if (front_len > m->front_alloc_len) {
+               pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
+                          front_len, m->front_alloc_len,
+                          (unsigned int)con->peer_name.type,
+                          le64_to_cpu(con->peer_name.num));
+               ceph_msg_put(m);
+               m = ceph_msg_new(type, front_len, GFP_NOFS, false);
        }
+
        return m;
 }
 
index 488dd1a825c05b704a26d8c88582454f1745be4c..fdbc9a81d4c2033565c36815d425d0a7d80372b7 100644 (file)
@@ -775,7 +775,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
 EXPORT_SYMBOL(__skb_checksum_complete);
 
 /**
- *     skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
+ *     skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
  *     @skb: skbuff
  *     @hlen: hardware length
  *     @iov: io vector
index b65a5051361f2dea31a0fac078b3dd656e126cc8..ab9a16530c3684b2e1ff39f6f0587fb6e4aa7f24 100644 (file)
@@ -2587,13 +2587,19 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
                return harmonize_features(skb, features);
        }
 
-       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
-                                              NETIF_F_HW_VLAN_STAG_TX);
+       features = netdev_intersect_features(features,
+                                            skb->dev->vlan_features |
+                                            NETIF_F_HW_VLAN_CTAG_TX |
+                                            NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
-               features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
-                               NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
-                               NETIF_F_HW_VLAN_STAG_TX;
+               features = netdev_intersect_features(features,
+                                                    NETIF_F_SG |
+                                                    NETIF_F_HIGHDMA |
+                                                    NETIF_F_FRAGLIST |
+                                                    NETIF_F_GEN_CSUM |
+                                                    NETIF_F_HW_VLAN_CTAG_TX |
+                                                    NETIF_F_HW_VLAN_STAG_TX);
 
        return harmonize_features(skb, features);
 }
@@ -4889,7 +4895,8 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
        if (adj->master)
                sysfs_remove_link(&(dev->dev.kobj), "master");
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+       if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
+           net_eq(dev_net(dev),dev_net(adj_dev)))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 
        list_del_rcu(&adj->list);
@@ -5159,11 +5166,65 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+void netdev_adjacent_add_links(struct net_device *dev)
+{
+       struct netdev_adjacent *iter;
+
+       struct net *net = dev_net(dev);
+
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_add(dev, iter->dev,
+                                         &dev->adj_list.upper);
+       }
+
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_add(dev, iter->dev,
+                                         &dev->adj_list.lower);
+       }
+}
+
+void netdev_adjacent_del_links(struct net_device *dev)
+{
+       struct netdev_adjacent *iter;
+
+       struct net *net = dev_net(dev);
+
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_del(iter->dev, dev->name,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_del(dev, iter->dev->name,
+                                         &dev->adj_list.upper);
+       }
+
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_del(iter->dev, dev->name,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_del(dev, iter->dev->name,
+                                         &dev->adj_list.lower);
+       }
+}
+
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
 {
        struct netdev_adjacent *iter;
 
+       struct net *net = dev_net(dev);
+
        list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
                netdev_adjacent_sysfs_del(iter->dev, oldname,
                                          &iter->dev->adj_list.lower);
                netdev_adjacent_sysfs_add(iter->dev, dev,
@@ -5171,6 +5232,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
        }
 
        list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
                netdev_adjacent_sysfs_del(iter->dev, oldname,
                                          &iter->dev->adj_list.upper);
                netdev_adjacent_sysfs_add(iter->dev, dev,
@@ -6773,6 +6836,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
        /* Send a netdev-removed uevent to the old namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
+       netdev_adjacent_del_links(dev);
 
        /* Actually switch the network namespace */
        dev_net_set(dev, net);
@@ -6787,6 +6851,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
        /* Send a netdev-add uevent to the new namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+       netdev_adjacent_add_links(dev);
 
        /* Fixup kobjects */
        err = device_rename(&dev->dev, dev->name);
index 6b5b6e7013cafec8a7b3767ed97a610bf35f1ae6..9d33dfffca19a992baf28e07ade0c7029c413ad0 100644 (file)
@@ -197,7 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
  * as destination. A new timer with the interval specified in the
  * configuration TLV is created. Upon each interval, the latest statistics
  * will be read from &bstats and the estimated rate will be stored in
- * &rate_est with the statistics lock grabed during this period.
+ * &rate_est with the statistics lock grabbed during this period.
  *
  * Returns 0 on success or a negative error code.
  *
index 9d3d9e78397b0f90f379addab25c7fd78294b3bc..2ddbce4cce144f62e8fed9dd39209fb3429b82a7 100644 (file)
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(gnet_stats_copy_queue);
  * @st: application specific statistics data
  * @len: length of data
  *
- * Appends the application sepecific statistics to the top level TLV created by
+ * Appends the application specific statistics to the top level TLV created by
  * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
  * handle is in backward compatibility mode.
  *
index 163b673f9e62d212230abd1c9b848c35ba923a0d..da1378a3e2c72e0f23e9e21581dfd2ece5491d09 100644 (file)
@@ -2647,7 +2647,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
  * skb_seq_read() will return the remaining part of the block.
  *
  * Note 1: The size of each block of data returned can be arbitrary,
- *       this limitation is the cost for zerocopy seqeuental
+ *       this limitation is the cost for zerocopy sequential
  *       reads of potentially non linear data.
  *
  * Note 2: Fragment lists within fragments are not implemented
@@ -2781,7 +2781,7 @@ EXPORT_SYMBOL(skb_find_text);
 /**
  * skb_append_datato_frags - append the user data to a skb
  * @sk: sock  structure
- * @skb: skb structure to be appened with user data.
+ * @skb: skb structure to be appended with user data.
  * @getfrag: call back function to be used for getting the user data
  * @from: pointer to user message iov
  * @length: length of the iov message
index 2714811afbd8bd3d35b5bd8619179eafaa71e19b..d372b4bd3f996dd3248e86748b7d16c912c9b5d7 100644 (file)
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(sk_ns_capable);
 /**
  * sk_capable - Socket global capability test
  * @sk: Socket to use a capability on or through
- * @cap: The global capbility to use
+ * @cap: The global capability to use
  *
  * Test to see if the opener of the socket had when the socket was
  * created and the current process has the capability @cap in all user
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(sk_capable);
  * @sk: Socket to use a capability on or through
  * @cap: The capability to use
  *
- * Test to see if the opener of the socket had when the socke was created
+ * Test to see if the opener of the socket had when the socket was created
  * and the current process has the capability @cap over the network namespace
  * the socket is a member of.
  */
@@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                                                           order);
                                        if (page)
                                                goto fill_page;
+                                       /* Do not retry other high order allocations */
+                                       order = 1;
+                                       max_page_order = 0;
                                }
                                order--;
                        }
@@ -1869,10 +1872,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
  * no guarantee that allocations succeed. Therefore, @sz MUST be
  * less or equal than PAGE_SIZE.
  */
-bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
 {
-       int order;
-
        if (pfrag->page) {
                if (atomic_read(&pfrag->page->_count) == 1) {
                        pfrag->offset = 0;
@@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
                put_page(pfrag->page);
        }
 
-       order = SKB_FRAG_PAGE_ORDER;
-       do {
-               gfp_t gfp = prio;
-
-               if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
-               pfrag->page = alloc_pages(gfp, order);
+       pfrag->offset = 0;
+       if (SKB_FRAG_PAGE_ORDER) {
+               pfrag->page = alloc_pages(gfp | __GFP_COMP |
+                                         __GFP_NOWARN | __GFP_NORETRY,
+                                         SKB_FRAG_PAGE_ORDER);
                if (likely(pfrag->page)) {
-                       pfrag->offset = 0;
-                       pfrag->size = PAGE_SIZE << order;
+                       pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
                        return true;
                }
-       } while (--order >= 0);
-
+       }
+       pfrag->page = alloc_page(gfp);
+       if (likely(pfrag->page)) {
+               pfrag->size = PAGE_SIZE;
+               return true;
+       }
        return false;
 }
 EXPORT_SYMBOL(skb_page_frag_refill);
index 016b77ee88f0e2f1394c0456f67e9e01ec190d9f..6591d27e53a43413656a18a6eedb25d7bb5fe034 100644 (file)
@@ -246,7 +246,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
                        return ERR_PTR(-rc);
                }
        } else {
-               frag = ERR_PTR(ENOMEM);
+               frag = ERR_PTR(-ENOMEM);
        }
 
        return frag;
@@ -437,7 +437,7 @@ static void lowpan_setup(struct net_device *dev)
        /* Frame Control + Sequence Number + Address fields + Security Header */
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
-       dev->mtu                = 1281;
+       dev->mtu                = IPV6_MIN_MTU;
        dev->tx_queue_len       = 0;
        dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
        dev->watchdog_timeo     = 0;
index ffec6ce510056960bb92d6ef0c3e44bc9493676f..32755cb7e64e3c02f77c85bdad6982d31311bcc5 100644 (file)
@@ -355,8 +355,6 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        struct net *net = dev_net(skb->dev);
        struct lowpan_frag_info *frag_info = lowpan_cb(skb);
        struct ieee802154_addr source, dest;
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
        int err;
 
        source = mac_cb(skb)->source;
@@ -366,8 +364,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        if (err < 0)
                goto err;
 
-       if (frag_info->d_size > ieee802154_lowpan->max_dsize)
+       if (frag_info->d_size > IPV6_MIN_MTU) {
+               net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
                goto err;
+       }
 
        fq = fq_find(net, frag_info, &source, &dest);
        if (fq != NULL) {
@@ -415,13 +415,6 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       {
-               .procname       = "6lowpanfrag_max_datagram_size",
-               .data           = &init_net.ieee802154_lowpan.max_dsize,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        { }
 };
 
@@ -458,7 +451,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
                table[1].data = &ieee802154_lowpan->frags.low_thresh;
                table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
                table[2].data = &ieee802154_lowpan->frags.timeout;
-               table[3].data = &ieee802154_lowpan->max_dsize;
 
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
@@ -533,7 +525,6 @@ static int __net_init lowpan_frags_init_net(struct net *net)
        ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
        ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
        ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
-       ieee802154_lowpan->max_dsize = 0xFFFF;
 
        inet_frags_init_net(&ieee802154_lowpan->frags);
 
index fb173126f03dfb19ef5a215693a1985cc00450a2..7cbcaf4f0194f22b797f04b1fca1029a0a4e159e 100644 (file)
@@ -82,6 +82,52 @@ config NF_TABLES_ARP
        help
          This option enables the ARP support for nf_tables.
 
+config NF_NAT_IPV4
+       tristate "IPv4 NAT"
+       depends on NF_CONNTRACK_IPV4
+       default m if NETFILTER_ADVANCED=n
+       select NF_NAT
+       help
+         The IPv4 NAT option allows masquerading, port forwarding and other
+         forms of full Network Address Port Translation. This can be
+         controlled by iptables or nft.
+
+if NF_NAT_IPV4
+
+config NF_NAT_SNMP_BASIC
+       tristate "Basic SNMP-ALG support"
+       depends on NF_CONNTRACK_SNMP
+       depends on NETFILTER_ADVANCED
+       default NF_NAT && NF_CONNTRACK_SNMP
+       ---help---
+
+         This module implements an Application Layer Gateway (ALG) for
+         SNMP payloads.  In conjunction with NAT, it allows a network
+         management system to access multiple private networks with
+         conflicting addresses.  It works by modifying IP addresses
+         inside SNMP payloads to match IP-layer NAT mapping.
+
+         This is the "basic" form of SNMP-ALG, as described in RFC 2962
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_NAT_PROTO_GRE
+       tristate
+       depends on NF_CT_PROTO_GRE
+
+config NF_NAT_PPTP
+       tristate
+       depends on NF_CONNTRACK
+       default NF_CONNTRACK_PPTP
+       select NF_NAT_PROTO_GRE
+
+config NF_NAT_H323
+       tristate
+       depends on NF_CONNTRACK
+       default NF_CONNTRACK_H323
+
+endif # NF_NAT_IPV4
+
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
        default m if NETFILTER_ADVANCED=n
@@ -170,19 +216,21 @@ config IP_NF_TARGET_SYNPROXY
          To compile it as a module, choose M here. If unsure, say N.
 
 # NAT + specific targets: nf_conntrack
-config NF_NAT_IPV4
-       tristate "IPv4 NAT"
+config IP_NF_NAT
+       tristate "iptables NAT support"
        depends on NF_CONNTRACK_IPV4
        default m if NETFILTER_ADVANCED=n
        select NF_NAT
+       select NF_NAT_IPV4
+       select NETFILTER_XT_NAT
        help
-         The IPv4 NAT option allows masquerading, port forwarding and other
-         forms of full Network Address Port Translation.  It is controlled by
-         the `nat' table in iptables: see the man page for iptables(8).
+         This enables the `nat' table in iptables. This allows masquerading,
+         port forwarding and other forms of full Network Address Port
+         Translation.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-if NF_NAT_IPV4
+if IP_NF_NAT
 
 config IP_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
@@ -214,47 +262,7 @@ config IP_NF_TARGET_REDIRECT
        (e.g. when running oldconfig). It selects
        CONFIG_NETFILTER_XT_TARGET_REDIRECT.
 
-endif
-
-config NF_NAT_SNMP_BASIC
-       tristate "Basic SNMP-ALG support"
-       depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
-       depends on NETFILTER_ADVANCED
-       default NF_NAT && NF_CONNTRACK_SNMP
-       ---help---
-
-         This module implements an Application Layer Gateway (ALG) for
-         SNMP payloads.  In conjunction with NAT, it allows a network
-         management system to access multiple private networks with
-         conflicting addresses.  It works by modifying IP addresses
-         inside SNMP payloads to match IP-layer NAT mapping.
-
-         This is the "basic" form of SNMP-ALG, as described in RFC 2962
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
-# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.
-# From kconfig-language.txt:
-#
-#           <expr> '&&' <expr>                   (6)
-#
-# (6) Returns the result of min(/expr/, /expr/).
-
-config NF_NAT_PROTO_GRE
-       tristate
-       depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
-
-config NF_NAT_PPTP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT_IPV4
-       default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
-       select NF_NAT_PROTO_GRE
-
-config NF_NAT_H323
-       tristate
-       depends on NF_CONNTRACK && NF_NAT_IPV4
-       default NF_NAT_IPV4 && NF_CONNTRACK_H323
+endif # IP_NF_NAT
 
 # mangle + specific targets
 config IP_NF_MANGLE
index 33001621465b8d131e59cfce8920f87f3e46c7fc..edf4af32e9f28fcdb3a391abbb0d651b4a16af38 100644 (file)
@@ -43,7 +43,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 # the three instances of ip_tables
 obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
 obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
+obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
 
index 0b239fc1816ed828862f19cdde0f08720a0b43ca..fc1fac2a052856e5c1b2476dc469d261a6558e5e 100644 (file)
@@ -1690,14 +1690,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
        addrconf_mod_dad_work(ifp, 0);
 }
 
-/* Join to solicited addr multicast group. */
-
+/* Join to solicited addr multicast group.
+ * caller must hold RTNL */
 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
 {
        struct in6_addr maddr;
 
-       ASSERT_RTNL();
-
        if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
 
@@ -1705,12 +1703,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
        ipv6_dev_mc_inc(dev, &maddr);
 }
 
+/* caller must hold RTNL */
 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct in6_addr maddr;
 
-       ASSERT_RTNL();
-
        if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
 
@@ -1718,12 +1715,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
        __ipv6_dev_mc_dec(idev, &maddr);
 }
 
+/* caller must hold RTNL */
 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
 
-       ASSERT_RTNL();
-
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1732,12 +1728,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
        ipv6_dev_ac_inc(ifp->idev->dev, &addr);
 }
 
+/* caller must hold RTNL */
 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
 
-       ASSERT_RTNL();
-
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -4773,15 +4768,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                addrconf_leave_solict(ifp->idev, &ifp->addr);
                if (!ipv6_addr_any(&ifp->peer_addr)) {
                        struct rt6_info *rt;
-                       struct net_device *dev = ifp->idev->dev;
-
-                       rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
-                                       dev->ifindex, 1);
-                       if (rt) {
-                               dst_hold(&rt->dst);
-                               if (ip6_del_rt(rt))
-                                       dst_free(&rt->dst);
-                       }
+
+                       rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
+                                                      ifp->idev->dev, 0, 0);
+                       if (rt && ip6_del_rt(rt))
+                               dst_free(&rt->dst);
                }
                dst_hold(&ifp->rt->dst);
 
index 2101832446896245e54b608bb24f27d83e42a005..ff2de7d9d8e6553de407b7aa5a490fc5c9a40461 100644 (file)
@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        pac->acl_next = NULL;
        pac->acl_addr = *addr;
 
+       rtnl_lock();
        rcu_read_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
 error:
        rcu_read_unlock();
+       rtnl_unlock();
        if (pac)
                sock_kfree_s(sk, pac, sizeof(*pac));
        return err;
@@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        spin_unlock_bh(&ipv6_sk_ac_lock);
 
+       rtnl_lock();
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
        if (dev)
                ipv6_dev_ac_dec(dev, &pac->acl_addr);
        rcu_read_unlock();
+       rtnl_unlock();
 
        sock_kfree_s(sk, pac, sizeof(*pac));
        return 0;
@@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk)
        spin_unlock_bh(&ipv6_sk_ac_lock);
 
        prev_index = 0;
+       rtnl_lock();
        rcu_read_lock();
        while (pac) {
                struct ipv6_ac_socklist *next = pac->acl_next;
@@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk)
                pac = next;
        }
        rcu_read_unlock();
+       rtnl_unlock();
 }
 
 static void aca_put(struct ifacaddr6 *ac)
@@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
        struct rt6_info *rt;
        int err;
 
+       ASSERT_RTNL();
+
        idev = in6_dev_get(dev);
 
        if (idev == NULL)
@@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct ifacaddr6 *aca, *prev_aca;
 
+       ASSERT_RTNL();
+
        write_lock_bh(&idev->lock);
        prev_aca = NULL;
        for (aca = idev->ac_list; aca; aca = aca->aca_next) {
index 617f0958e164e7893ca70e80e09d1ed933c95b69..a23b655a7627a69046d956139946c00dda8825f4 100644 (file)
@@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        mc_lst->next = NULL;
        mc_lst->addr = *addr;
 
+       rtnl_lock();
        rcu_read_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
@@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        if (dev == NULL) {
                rcu_read_unlock();
+               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return -ENODEV;
        }
@@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        if (err) {
                rcu_read_unlock();
+               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return err;
        }
@@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        spin_unlock(&ipv6_sk_mc_lock);
 
        rcu_read_unlock();
+       rtnl_unlock();
 
        return 0;
 }
@@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
        if (!ipv6_addr_is_multicast(addr))
                return -EINVAL;
 
+       rtnl_lock();
        spin_lock(&ipv6_sk_mc_lock);
        for (lnk = &np->ipv6_mc_list;
             (mc_lst = rcu_dereference_protected(*lnk,
@@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
                        } else
                                (void) ip6_mc_leave_src(sk, mc_lst, NULL);
                        rcu_read_unlock();
+                       rtnl_unlock();
+
                        atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
                        kfree_rcu(mc_lst, rcu);
                        return 0;
                }
        }
        spin_unlock(&ipv6_sk_mc_lock);
+       rtnl_unlock();
 
        return -EADDRNOTAVAIL;
 }
@@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk)
        if (!rcu_access_pointer(np->ipv6_mc_list))
                return;
 
+       rtnl_lock();
        spin_lock(&ipv6_sk_mc_lock);
        while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
                                lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
@@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk)
                spin_lock(&ipv6_sk_mc_lock);
        }
        spin_unlock(&ipv6_sk_mc_lock);
+       rtnl_unlock();
 }
 
 int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        struct ifmcaddr6 *mc;
        struct inet6_dev *idev;
 
+       ASSERT_RTNL();
+
        /* we need to take a reference on idev */
        idev = in6_dev_get(dev);
 
@@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct ifmcaddr6 *ma, **map;
 
+       ASSERT_RTNL();
+
        write_lock_bh(&idev->lock);
        for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
                if (ipv6_addr_equal(&ma->mca_addr, addr)) {
index ac93df16f5af678f8724346343517ef241a415f3..2812816aabdc0a2918b183094ac4a119b4c0f1fa 100644 (file)
@@ -57,9 +57,19 @@ config NFT_REJECT_IPV6
 
 config NF_LOG_IPV6
        tristate "IPv6 packet logging"
-       depends on NETFILTER_ADVANCED
+       default m if NETFILTER_ADVANCED=n
        select NF_LOG_COMMON
 
+config NF_NAT_IPV6
+       tristate "IPv6 NAT"
+       depends on NF_CONNTRACK_IPV6
+       depends on NETFILTER_ADVANCED
+       select NF_NAT
+       help
+         The IPv6 NAT option allows masquerading, port forwarding and other
+         forms of full Network Address Port Translation. This can be
+         controlled by iptables or nft.
+
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering)"
        depends on INET && IPV6
@@ -232,19 +242,21 @@ config IP6_NF_SECURITY
 
          If unsure, say N.
 
-config NF_NAT_IPV6
-       tristate "IPv6 NAT"
+config IP6_NF_NAT
+       tristate "ip6tables NAT support"
        depends on NF_CONNTRACK_IPV6
        depends on NETFILTER_ADVANCED
        select NF_NAT
+       select NF_NAT_IPV6
+       select NETFILTER_XT_NAT
        help
-         The IPv6 NAT option allows masquerading, port forwarding and other
-         forms of full Network Address Port Translation. It is controlled by
-         the `nat' table in ip6tables, see the man page for ip6tables(8).
+         This enables the `nat' table in ip6tables. This allows masquerading,
+         port forwarding and other forms of full Network Address Port
+         Translation.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-if NF_NAT_IPV6
+if IP6_NF_NAT
 
 config IP6_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
@@ -265,7 +277,7 @@ config IP6_NF_TARGET_NPT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-endif # NF_NAT_IPV6
+endif # IP6_NF_NAT
 
 endif # IP6_NF_IPTABLES
 
index c0b263104ed23170e15b3278408fe95f36c77da9..c3d3286db4bb804546ee8b0488ecdad1441bb06b 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
 obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
 obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
-obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
+obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
index 13752d96275e8b9142539a201ea1ac6f45f883ba..b704a9356208f5cc3064085b9d82eab52542dd5a 100644 (file)
@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        /* If PMTU discovery was enabled, use the MTU that was discovered */
        dst = sk_dst_get(tunnel->sock);
        if (dst != NULL) {
-               u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+               u32 pmtu = dst_mtu(dst);
+
                if (pmtu != 0)
                        session->mtu = session->mru = pmtu -
                                PPPOL2TP_HEADER_OVERHEAD;
index 0375009ddc0db39fd5a365db8cb5e4122219d864..399ad82c997f99c33833c03c67e6acddf7019b6f 100644 (file)
@@ -541,6 +541,8 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
                        continue;
                if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                       continue;
 
                if (!compat)
                        compat = &sdata->vif.bss_conf.chandef;
index 3db96648b45a02c0e0235b724210a15ba32747b7..86173c0de40e91d658e883514232c720a4c51e0e 100644 (file)
@@ -167,7 +167,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
        p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
                        sta->ampdu_mlme.dialog_token_allocator + 1);
        p += scnprintf(p, sizeof(buf) + buf - p,
-                      "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
+                      "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
 
        for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
                tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
index 01eede7406a55610e771c7cd906cbedcfff1fa6a..f75e5f132c5ad4a816fc933defedda38f0695e66 100644 (file)
@@ -1175,8 +1175,8 @@ static void ieee80211_iface_work(struct work_struct *work)
                        if (sta) {
                                u16 last_seq;
 
-                               last_seq = le16_to_cpu(
-                                       sta->last_seq_ctrl[rx_agg->tid]);
+                               last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(
+                                       sta->last_seq_ctrl[rx_agg->tid]));
 
                                __ieee80211_start_rx_ba_session(sta,
                                                0, 0,
index 63b874101b2763d5997dc561073e96807596c11a..c47194d2714933bd8e1dc3658ffd165cd50a5a4a 100644 (file)
@@ -959,7 +959,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                if (!matches_local)
                        event = CNF_RJCT;
                if (!mesh_plink_free_count(sdata) ||
-                   (sta->llid != llid || sta->plid != plid))
+                   sta->llid != llid ||
+                   (sta->plid && sta->plid != plid))
                        event = CNF_IGNR;
                else
                        event = CNF_ACPT;
@@ -1080,6 +1081,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
                goto unlock_rcu;
        }
 
+       /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+       if (!sta->plid && event == CNF_ACPT)
+               sta->plid = plid;
+
        changed |= mesh_plink_fsm(sdata, sta, event);
 
 unlock_rcu:
index 31a8afaf73323bc09fb99f8e960b17c3fa2e31fd..b82a12a9f0f110779756027944dce6293d1387fb 100644 (file)
@@ -4376,8 +4376,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        rcu_read_unlock();
 
        if (bss->wmm_used && bss->uapsd_supported &&
-           (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
-           sdata->wmm_acm != 0xff) {
+           (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
                assoc_data->uapsd = true;
                ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
        } else {
index c6ee2139fbc579bab35e8cc5bfb6f3b4994930d2..441875f03750ca5a41255faaf9e062c8af1efc32 100644 (file)
@@ -1094,8 +1094,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        unsigned long flags;
        struct ps_data *ps;
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+                                    u.ap);
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP)
                ps = &sdata->bss->ps;
        else if (ieee80211_vif_is_mesh(&sdata->vif))
                ps = &sdata->u.mesh.ps;
index 3c3069fd69718277fc719e82d7bf2c51bb9747db..547838822d5e9c0d7064d72e2dc3be1ff2c14e14 100644 (file)
@@ -462,7 +462,10 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
                        skb->pkt_type = PACKET_OTHERHOST;
                break;
        default:
-               break;
+               spin_unlock_bh(&sdata->mib_lock);
+               pr_debug("invalid dest mode\n");
+               kfree_skb(skb);
+               return NET_RX_DROP;
        }
 
        spin_unlock_bh(&sdata->mib_lock);
@@ -573,6 +576,7 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
        ret = mac802154_parse_frame_start(skb, &hdr);
        if (ret) {
                pr_debug("got invalid frame\n");
+               kfree_skb(skb);
                return;
        }
 
index ad751fe2e82b8ca017bc83f07d3dd7e9f04229f5..b5c1d3aadb41569d95728d9eca5faf6831caa9dc 100644 (file)
@@ -499,7 +499,7 @@ config NFT_LIMIT
 config NFT_NAT
        depends on NF_TABLES
        depends on NF_CONNTRACK
-       depends on NF_NAT
+       select NF_NAT
        tristate "Netfilter nf_tables nat module"
        help
          This option adds the "nat" expression that you can use to perform
@@ -747,7 +747,9 @@ config NETFILTER_XT_TARGET_LED
 
 config NETFILTER_XT_TARGET_LOG
        tristate "LOG target support"
-       depends on NF_LOG_IPV4 && NF_LOG_IPV6
+       select NF_LOG_COMMON
+       select NF_LOG_IPV4
+       select NF_LOG_IPV6 if IPV6
        default m if NETFILTER_ADVANCED=n
        help
          This option adds a `LOG' target, which allows you to create rules in
@@ -764,6 +766,14 @@ config NETFILTER_XT_TARGET_MARK
        (e.g. when running oldconfig). It selects
        CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
 
+config NETFILTER_XT_NAT
+       tristate '"SNAT and DNAT" targets support'
+       depends on NF_NAT
+       ---help---
+       This option enables the SNAT and DNAT targets.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_NETMAP
        tristate '"NETMAP" target support'
        depends on NF_NAT
index 8308624a406ac0d891338e73c44abe6a5fa4c8b0..fad5fdba34e5873c0535037bf709c343ebfd55e3 100644 (file)
@@ -95,7 +95,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
 obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
-obj-$(CONFIG_NF_NAT) += xt_nat.o
+obj-$(CONFIG_NETFILTER_XT_NAT) += xt_nat.o
 
 # targets
 obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
index a93c97f106d4a5022cd0e1196ee70ecdfd8c2873..024a2e25c8a49448afbeed4f780912b24a6318a3 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
 
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
 #endif
@@ -72,7 +72,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
        }
        list_add_rcu(&reg->list, elem->list.prev);
        mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
        static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        return 0;
@@ -84,7 +84,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
        mutex_lock(&nf_hook_mutex);
        list_del_rcu(&reg->list);
        mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
        static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        synchronize_net();
index e6836755c45d4168bb6ccc9ab6f12708e9e17f3d..5c34e8d42e0190a14ec31c1238bde5929fdf0539 100644 (file)
@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        {
                .hook           = ip_vs_local_reply6,
                .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
+               .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_NAT_DST + 1,
        },
index 6f70bdd3a90ad85c72cb100de997d2f5b5bc40a1..56896a412bcec7ae01726734115d19bc04079274 100644 (file)
@@ -38,6 +38,7 @@
 #include <net/route.h>                  /* for ip_route_output */
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
 #include <net/addrconf.h>
 #include <linux/icmpv6.h>
 #include <linux/netfilter.h>
@@ -862,11 +863,15 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                old_iph = ip_hdr(skb);
        }
 
-       skb->transport_header = skb->network_header;
-
        /* fix old IP header checksum */
        ip_send_check(old_iph);
 
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+       if (IS_ERR(skb))
+               goto tx_error;
+
+       skb->transport_header = skb->network_header;
+
        skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -900,7 +905,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        return NF_STOLEN;
 
   tx_error:
-       kfree_skb(skb);
+       if (!IS_ERR(skb))
+               kfree_skb(skb);
        rcu_read_unlock();
        LeaveFunction(10);
        return NF_STOLEN;
@@ -953,6 +959,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                old_iph = ipv6_hdr(skb);
        }
 
+       /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */
+       skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */
+       if (IS_ERR(skb))
+               goto tx_error;
+
        skb->transport_header = skb->network_header;
 
        skb_push(skb, sizeof(struct ipv6hdr));
@@ -988,7 +999,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        return NF_STOLEN;
 
 tx_error:
-       kfree_skb(skb);
+       if (!IS_ERR(skb))
+               kfree_skb(skb);
        rcu_read_unlock();
        LeaveFunction(10);
        return NF_STOLEN;
index f4e833005320fa7da4c0deb41bba272e8c54bd8e..7198d660b4dea1e9e79c6f9a13f4e6669bca569d 100644 (file)
@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct xt_mtchk_param *par)
        if (info->invert & ~1)
                return -EINVAL;
 
-       return info->id ? 0 : -EINVAL;
+       return 0;
 }
 
 static bool
index 7228ec3faf19cdc02a685caa04ccbf04ddf8005a..91d66b7e64ac71f31dcc78208857a20ce7a97f79 100644 (file)
@@ -265,8 +265,11 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
                upcall.key = &key;
                upcall.userdata = NULL;
                upcall.portid = ovs_vport_find_upcall_portid(p, skb);
-               ovs_dp_upcall(dp, skb, &upcall);
-               consume_skb(skb);
+               error = ovs_dp_upcall(dp, skb, &upcall);
+               if (unlikely(error))
+                       kfree_skb(skb);
+               else
+                       consume_skb(skb);
                stats_counter = &stats->n_missed;
                goto out;
        }
@@ -404,7 +407,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 {
        struct ovs_header *upcall;
        struct sk_buff *nskb = NULL;
-       struct sk_buff *user_skb; /* to be queued to userspace */
+       struct sk_buff *user_skb = NULL; /* to be queued to userspace */
        struct nlattr *nla;
        struct genl_info info = {
                .dst_sk = ovs_dp_get_net(dp)->genl_sock,
@@ -494,9 +497,11 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 
        err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
+       user_skb = NULL;
 out:
        if (err)
                skb_tx_error(skb);
+       kfree_skb(user_skb);
        kfree_skb(nskb);
        return err;
 }
index 14c98e48f261ee49656eabb198d62648890b63b3..02a86a27fd846602dd0c9aafd0a045f196e246b4 100644 (file)
@@ -158,6 +158,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
        { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+       { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
        { "BCM4752", RFKILL_TYPE_GPS },
        { "LNV4752", RFKILL_TYPE_GPS },
        { },
index eb71d49e76537f17d48e3eeaa43661fb09788f41..634a2abb5f3a25f15ca99c4737190c1e5e17b27a 100644 (file)
@@ -4243,7 +4243,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
        transport = asoc->peer.primary_path;
 
        status.sstat_assoc_id = sctp_assoc2id(asoc);
-       status.sstat_state = asoc->state;
+       status.sstat_state = sctp_assoc_to_state(asoc);
        status.sstat_rwnd =  asoc->peer.rwnd;
        status.sstat_unackdata = asoc->unack_data;
 
index 95ee7d8682e7447f45f5ddb0ddbde5fff196c84e..2e2586e2dee19184e07e01c528563c7000cf912e 100644 (file)
@@ -734,8 +734,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
        }
 
        memset(&tss, 0, sizeof(tss));
-       if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
-            skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP) &&
+       if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
            ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
                empty = 0;
        if (shhwtstamps &&
@@ -2602,7 +2601,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
  *
  *     This function is called by a protocol handler that wants to
  *     advertise its address family, and have it linked into the
- *     socket interface. The value ops->family coresponds to the
+ *     socket interface. The value ops->family corresponds to the
  *     socket system call protocol family.
  */
 int sock_register(const struct net_proto_family *ops)
index b385bcbbf2f58e03d21ec155a304cf3baed7cdd8..4d08b398411fdec9b6707b4cd0db2fd73f1b13fa 100755 (executable)
@@ -2133,7 +2133,10 @@ sub process {
 # Check for improperly formed commit descriptions
                if ($in_commit_log &&
                    $line =~ /\bcommit\s+[0-9a-f]{5,}/i &&
-                   $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) {
+                   !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ ||
+                     ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ &&
+                      defined $rawlines[$linenr] &&
+                      $rawlines[$linenr] =~ /^\s*\("/))) {
                        $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i;
                        my $init_char = $1;
                        my $orig_commit = lc($2);
index b90a68c4e2c4277035b21f873af64efcd95bf782..6d0cad16f00265a1dc31d8a0804984040fb47995 100644 (file)
@@ -27,8 +27,8 @@ DEFINE_SPINLOCK(key_serial_lock);
 struct rb_root key_user_tree; /* tree of quota records indexed by UID */
 DEFINE_SPINLOCK(key_user_lock);
 
-unsigned int key_quota_root_maxkeys = 200;     /* root's key count quota */
-unsigned int key_quota_root_maxbytes = 20000;  /* root's key space quota */
+unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
+unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
 unsigned int key_quota_maxkeys = 200;          /* general key count quota */
 unsigned int key_quota_maxbytes = 20000;       /* general key space quota */
 
index f96bf4c7c23236a773f13a0c318ddb5b6f275977..95fc2eaf11dc0f91bcb235a2929b300c39448052 100644 (file)
@@ -507,7 +507,16 @@ static void amdtp_pull_midi(struct amdtp_stream *s,
 static void update_pcm_pointers(struct amdtp_stream *s,
                                struct snd_pcm_substream *pcm,
                                unsigned int frames)
-{      unsigned int ptr;
+{
+       unsigned int ptr;
+
+       /*
+        * In IEC 61883-6, one data block represents one event. In ALSA, one
+        * event equals to one PCM frame. But Dice has a quirk to transfer
+        * two PCM frames in one data block.
+        */
+       if (s->double_pcm_frames)
+               frames *= 2;
 
        ptr = s->pcm_buffer_pointer + frames;
        if (ptr >= pcm->runtime->buffer_size)
index d8ee7b0e938629a828571fcd45ce5da5edc02469..4823c08196ac775f150656b712dd7839b5428a29 100644 (file)
@@ -125,6 +125,7 @@ struct amdtp_stream {
        unsigned int pcm_buffer_pointer;
        unsigned int pcm_period_pointer;
        bool pointer_flush;
+       bool double_pcm_frames;
 
        struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
 
index a9a30c0161f17be87e9c79403d1c913f85ab4c86..e3a04d69c85363dfffdc6bebf5400b95cff401fa 100644 (file)
@@ -567,10 +567,14 @@ static int dice_hw_params(struct snd_pcm_substream *substream,
                return err;
 
        /*
-        * At rates above 96 kHz, pretend that the stream runs at half the
-        * actual sample rate with twice the number of channels; two samples
-        * of a channel are stored consecutively in the packet. Requires
-        * blocking mode and PCM buffer size should be aligned to SYT_INTERVAL.
+        * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in
+        * one data block of AMDTP packet. Thus sampling transfer frequency is
+        * a half of PCM sampling frequency, i.e. PCM frames at 192.0 kHz are
+        * transferred on AMDTP packets at 96 kHz. Two successive samples of a
+        * channel are stored consecutively in the packet. This quirk is called
+        * as 'Dual Wire'.
+        * For this quirk, blocking mode is required and PCM buffer size should
+        * be aligned to SYT_INTERVAL.
         */
        channels = params_channels(hw_params);
        if (rate_index > 4) {
@@ -579,18 +583,25 @@ static int dice_hw_params(struct snd_pcm_substream *substream,
                        return err;
                }
 
-               for (i = 0; i < channels; i++) {
-                       dice->stream.pcm_positions[i * 2] = i;
-                       dice->stream.pcm_positions[i * 2 + 1] = i + channels;
-               }
-
                rate /= 2;
                channels *= 2;
+               dice->stream.double_pcm_frames = true;
+       } else {
+               dice->stream.double_pcm_frames = false;
        }
 
        mode = rate_index_to_mode(rate_index);
        amdtp_stream_set_parameters(&dice->stream, rate, channels,
                                    dice->rx_midi_ports[mode]);
+       if (rate_index > 4) {
+               channels /= 2;
+
+               for (i = 0; i < channels; i++) {
+                       dice->stream.pcm_positions[i] = i * 2;
+                       dice->stream.pcm_positions[i + channels] = i * 2 + 1;
+               }
+       }
+
        amdtp_stream_set_pcm_format(&dice->stream,
                                    params_format(hw_params));
 
index 6f2fa838b63532852ab535ab9ff189b99c52edea..6e5d0cb4e3d737ff43f0eb63e298de817552d224 100644 (file)
@@ -217,6 +217,7 @@ enum {
        CXT_FIXUP_HEADPHONE_MIC_PIN,
        CXT_FIXUP_HEADPHONE_MIC,
        CXT_FIXUP_GPIO1,
+       CXT_FIXUP_ASPIRE_DMIC,
        CXT_FIXUP_THINKPAD_ACPI,
        CXT_FIXUP_OLPC_XO,
        CXT_FIXUP_CAP_MIX_AMP,
@@ -664,6 +665,12 @@ static const struct hda_fixup cxt_fixups[] = {
                        { }
                },
        },
+       [CXT_FIXUP_ASPIRE_DMIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_stereo_dmic,
+               .chained = true,
+               .chain_id = CXT_FIXUP_GPIO1,
+       },
        [CXT_FIXUP_THINKPAD_ACPI] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = hda_fixup_thinkpad_acpi,
@@ -744,7 +751,7 @@ static const struct hda_model_fixup cxt5051_fixup_models[] = {
 
 static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
-       SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
+       SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
        SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
index d446ac3137b37ac6f3512f8fa7830b537632bca8..1ba22fb527c288b751d3064e041759e881013605 100644 (file)
@@ -328,6 +328,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
                case 0x10ec0885:
                case 0x10ec0887:
                /*case 0x10ec0889:*/ /* this causes an SPDIF problem */
+               case 0x10ec0900:
                        alc889_coef_init(codec);
                        break;
                case 0x10ec0888:
@@ -2350,6 +2351,7 @@ static int patch_alc882(struct hda_codec *codec)
        switch (codec->vendor_id) {
        case 0x10ec0882:
        case 0x10ec0885:
+       case 0x10ec0900:
                break;
        default:
                /* ALC883 and variants */
index a20b30ca52c09dc13507d55afef7854a23f29e40..98523209f7391852cb0e517fb373c6635e2ded99 100644 (file)
@@ -282,10 +282,10 @@ static const struct cs4265_clk_para clk_map_table[] = {
 
        /*64k*/
        {8192000, 64000, 1, 0},
-       {1228800, 64000, 1, 1},
-       {1693440, 64000, 1, 2},
-       {2457600, 64000, 1, 3},
-       {3276800, 64000, 1, 4},
+       {12288000, 64000, 1, 1},
+       {16934400, 64000, 1, 2},
+       {24576000, 64000, 1, 3},
+       {32768000, 64000, 1, 4},
 
        /* 88.2k */
        {11289600, 88200, 1, 0},
@@ -435,10 +435,10 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
        index = cs4265_get_clk_index(cs4265->sysclk, params_rate(params));
        if (index >= 0) {
                snd_soc_update_bits(codec, CS4265_ADC_CTL,
-                       CS4265_ADC_FM, clk_map_table[index].fm_mode);
+                       CS4265_ADC_FM, clk_map_table[index].fm_mode << 6);
                snd_soc_update_bits(codec, CS4265_MCLK_FREQ,
                        CS4265_MCLK_FREQ_MASK,
-                       clk_map_table[index].mclkdiv);
+                       clk_map_table[index].mclkdiv << 4);
 
        } else {
                dev_err(codec->dev, "can't get correct mclk\n");
index 1dceafeec4151b4ae85b629aaa61e0737de73c34..f586cbd30b776ead146eb7f0edb4d11f7c74f013 100644 (file)
@@ -11,7 +11,7 @@
  */
 
 #ifndef __DA732X_H_
-#define __DA732X_H
+#define __DA732X_H_
 
 #include <sound/soc.h>
 
index 6bc6efdec550db684e4f8bfa32bd88107902d9cd..f1ec6e6bd08aced4be7c38d868ccba94a655c5f7 100644 (file)
@@ -2059,6 +2059,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
 static const struct regmap_config rt5640_regmap = {
        .reg_bits = 8,
        .val_bits = 16,
+       .use_single_rw = true,
 
        .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
                                               RT5640_PR_SPACING),
index 67f14556462ff35bd6601062fc55a40ec2a96310..5337c448b5e365de204a95e3a945291dd228844b 100644 (file)
@@ -2135,10 +2135,10 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
        { "BST2", NULL, "IN2P" },
        { "BST2", NULL, "IN2N" },
 
-       { "IN1P", NULL, "micbias1" },
-       { "IN1N", NULL, "micbias1" },
-       { "IN2P", NULL, "micbias1" },
-       { "IN2N", NULL, "micbias1" },
+       { "IN1P", NULL, "MICBIAS1" },
+       { "IN1N", NULL, "MICBIAS1" },
+       { "IN2P", NULL, "MICBIAS1" },
+       { "IN2N", NULL, "MICBIAS1" },
 
        { "ADC 1", NULL, "BST1" },
        { "ADC 1", NULL, "ADC 1 power" },
index 159e517fa09aa6ee5d061e658cafcd9d0f8f12a5..cef7776b712cff43f37f81648a6fe2a59256ddd9 100644 (file)
@@ -481,12 +481,19 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
        snd_soc_card_set_drvdata(&priv->snd_card, priv);
 
        ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
+       if (ret >= 0)
+               return ret;
 
 err:
        asoc_simple_card_unref(pdev);
        return ret;
 }
 
+static int asoc_simple_card_remove(struct platform_device *pdev)
+{
+       return asoc_simple_card_unref(pdev);
+}
+
 static const struct of_device_id asoc_simple_of_match[] = {
        { .compatible = "simple-audio-card", },
        {},
@@ -500,6 +507,7 @@ static struct platform_driver asoc_simple_card = {
                .of_match_table = asoc_simple_of_match,
        },
        .probe = asoc_simple_card_probe,
+       .remove = asoc_simple_card_remove,
 };
 
 module_platform_driver(asoc_simple_card);
index f8a6adc2d81ca8cecec98a3b768cbc1ccef9b0c0..4336d1831485f77787f384d77ad4785fce6ad02a 100644 (file)
@@ -260,7 +260,7 @@ static struct snd_soc_dai_link omap_twl4030_dai_links[] = {
                .stream_name = "TWL4030 Voice",
                .cpu_dai_name = "omap-mcbsp.3",
                .codec_dai_name = "twl4030-voice",
-               .platform_name = "omap-mcbsp.2",
+               .platform_name = "omap-mcbsp.3",
                .codec_name = "twl4030-codec",
                .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
                           SND_SOC_DAIFMT_CBM_CFM,
index 3fdf3be7b99ab558e41fd26080b3bb80c5d53b59..f95e7ab135e83266f958c49801284a6e780744a3 100644 (file)
@@ -247,7 +247,7 @@ rsnd_gen2_dma_addr(struct rsnd_priv *priv,
        };
 
        /* it shouldn't happen */
-       if (use_dvc & !use_src)
+       if (use_dvc && !use_src)
                dev_err(dev, "DVC is selected without SRC\n");
 
        /* use SSIU or SSI ? */
index d4bfd4a9076fe02fd50495e0fee6f97471500bb5..889f4e3d35dc53548f59f159666fbc668f7bd39b 100644 (file)
@@ -1325,7 +1325,7 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd,
        device_initialize(rtd->dev);
        rtd->dev->parent = rtd->card->dev;
        rtd->dev->release = rtd_release;
-       rtd->dev->init_name = name;
+       dev_set_name(rtd->dev, "%s", name);
        dev_set_drvdata(rtd->dev, rtd);
        mutex_init(&rtd->pcm_mutex);
        INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
index 9577121ce971247407b6d04957de548a5b3fd67d..ca8037634100b0a814927f52da9d77fca28fd657 100644 (file)
@@ -21,7 +21,7 @@
  */
 
 #ifndef __TEGRA_ASOC_UTILS_H__
-#define __TEGRA_ASOC_UTILS_H_
+#define __TEGRA_ASOC_UTILS_H__
 
 struct clk;
 struct device;
index 5a0e95edf4dfed55288acc59fb9ea88f49f0dcd6..15fe792e1e9655bf35e016f0a4de1a1f72737c7f 100644 (file)
@@ -15,7 +15,7 @@
 #include <syslog.h>
 #include <unistd.h>
 #include <linux/usb/ch9.h>
-#include "../../uapi/usbip.h"
+#include <linux/usbip.h>
 
 #ifndef USBIDS_FILE
 #define USBIDS_FILE "/usr/share/hwdata/usb.ids"
This page took 0.451283 seconds and 5 git commands to generate.