Merge branch 'for_3.8-rc1' into v4l_for_linus
authorMauro Carvalho Chehab <mchehab@redhat.com>
Tue, 11 Dec 2012 13:28:37 +0000 (11:28 -0200)
committerMauro Carvalho Chehab <mchehab@redhat.com>
Tue, 11 Dec 2012 13:28:37 +0000 (11:28 -0200)
* for_3.8-rc1: (243 commits)
  [media] omap3isp: Replace cpu_is_omap3630() with ISP revision check
  [media] omap3isp: Prepare/unprepare clocks before/after enable/disable
  [media] omap3isp: preview: Add support for 8-bit formats at the sink pad
  [media] omap3isp: Replace printk with dev_*
  [media] omap3isp: Find source pad from external entity
  [media] omap3isp: Configure CSI-2 phy based on platform data
  [media] omap3isp: Add PHY routing configuration
  [media] omap3isp: Add CSI configuration registers from control block to ISP resources
  [media] omap3isp: Remove unneeded module memory address definitions
  [media] omap3isp: Use monotonic timestamps for statistics buffers
  [media] uvcvideo: Fix control value clamping for unsigned integer controls
  [media] uvcvideo: Mark first output terminal as default video node
  [media] uvcvideo: Add VIDIOC_[GS]_PRIORITY support
  [media] uvcvideo: Return -ENOTTY for unsupported ioctls
  [media] uvcvideo: Set device_caps in VIDIOC_QUERYCAP
  [media] uvcvideo: Don't fail when an unsupported format is requested
  [media] uvcvideo: Return -EACCES when trying to access a read/write-only control
  [media] uvcvideo: Set error_idx properly for extended controls API failures
  [media] rtl28xxu: add NOXON DAB/DAB+ USB dongle rev 2
  [media] fc2580: write some registers conditionally
  ...

162 files changed:
Makefile
arch/arm/Kconfig
arch/arm/common/timer-sp.c
arch/arm/mach-dove/include/mach/pm.h
arch/arm/mach-dove/irq.c
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-ixp4xx/goramo_mlr.c
arch/arm/mach-ixp4xx/include/mach/debug-macro.S
arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
arch/arm/mach-ixp4xx/include/mach/qmgr.h
arch/arm/mach-ixp4xx/ixp4xx_npe.c
arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
arch/arm/mach-kirkwood/pcie.c
arch/arm/plat-s3c24xx/dma.c
arch/arm64/include/asm/unistd32.h
arch/c6x/include/asm/setup.h [new file with mode: 0644]
arch/c6x/include/uapi/asm/Kbuild
arch/c6x/include/uapi/asm/kvm_para.h [deleted file]
arch/c6x/include/uapi/asm/setup.h
arch/c6x/kernel/entry.S
arch/microblaze/kernel/signal.c
arch/mips/include/asm/hugetlb.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/entry.S
arch/mips/kernel/scall64-n32.S
arch/mips/mm/tlb-r4k.c
arch/openrisc/kernel/signal.c
arch/parisc/kernel/syscall_table.S
arch/s390/kernel/compat_wrapper.S
arch/score/kernel/signal.c
arch/sh/kernel/signal_64.c
arch/sparc/boot/piggyback.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/syscalls.S
arch/sparc/kernel/systbls_64.S
arch/um/kernel/exec.c
arch/x86/include/asm/Kbuild
arch/x86/include/asm/fpu-internal.h
arch/x86/kernel/head_32.S
arch/x86/kernel/ptrace.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/emulate.c
drivers/atm/ambassador.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/ixp4xx-rng.c
drivers/char/raw.c
drivers/crypto/Kconfig
drivers/crypto/ixp4xx_crypto.c
drivers/edac/edac_mc.c
drivers/edac/i7300_edac.c
drivers/edac/i7core_edac.c
drivers/edac/i82975x_edac.c
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/input/matrix-keymap.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/media/platform/exynos-gsc/gsc-m2m.c
drivers/media/platform/exynos-gsc/gsc-regs.h
drivers/media/platform/s5p-fimc/fimc-capture.c
drivers/media/platform/s5p-fimc/fimc-lite.c
drivers/media/platform/s5p-fimc/fimc-m2m.c
drivers/media/platform/s5p-fimc/fimc-mdevice.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sh_mmcif.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/wan/ixp4xx_hss.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/remoteproc/remoteproc_virtio.c
drivers/rtc/rtc-tps65910.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/target/target_core_transport.c
drivers/vhost/vhost.c
fs/block_dev.c
fs/buffer.c
fs/cifs/file.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/direct-io.c
fs/file.c
fs/namei.c
fs/nfs/dir.c
include/linux/fs.h
include/linux/hw_breakpoint.h
include/linux/kernel.h
include/linux/mempolicy.h
include/linux/netdevice.h
include/linux/percpu-rwsem.h
include/net/tcp.h
include/uapi/linux/Kbuild
include/uapi/linux/hw_breakpoint.h [new file with mode: 0644]
kernel/events/hw_breakpoint.c
kernel/modsign_pubkey.c
kernel/module_signing.c
kernel/sched/auto_group.c
kernel/sched/auto_group.h
kernel/watchdog.c
kernel/workqueue.c
lib/Makefile
lib/asn1_decoder.c
mm/compaction.c
mm/memory-failure.c
mm/mempolicy.c
mm/page_alloc.c
mm/shmem.c
mm/sparse.c
mm/vmscan.c
net/can/bcm.c
net/core/dev.c
net/core/skbuff.c
net/ipv4/icmp.c
net/ipv4/inet_diag.c
net/ipv4/ip_fragment.c
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/irda/irttp.c
net/mac80211/offchannel.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/openvswitch/flow.c
net/openvswitch/vport-netdev.c
net/sctp/chunk.c
net/sctp/socket.c
net/sctp/transport.c
tools/Makefile
tools/perf/Makefile
tools/perf/arch/x86/include/perf_regs.h
tools/perf/builtin-kvm.c
tools/perf/builtin-test.c
tools/perf/perf.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/parse-events-test.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/pmu.h
tools/perf/util/session.h
tools/perf/util/strbuf.c
tools/scripts/Makefile.include

index 3d2fc460b22f38b6d37ba31016a96c6926bdc0b1..540f7b240c77d7332f7c51746de660a709e6c431 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Terrified Chipmunk
 
 # *DOCUMENTATION*
@@ -1321,10 +1321,12 @@ kernelversion:
 
 # Clear a bunch of variables before executing the submake
 tools/: FORCE
-       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
+       $(Q)mkdir -p $(objtree)/tools
+       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/
 
 tools/%: FORCE
-       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
+       $(Q)mkdir -p $(objtree)/tools
+       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ $*
 
 # Single targets
 # ---------------------------------------------------------------------------
index ade7e924bef5faaf00de76c0354e025ed4371b35..9759fec0b704559454ce8e054eaaa5b6d7c25335 100644 (file)
@@ -547,6 +547,7 @@ config ARCH_KIRKWOOD
        select CPU_FEROCEON
        select GENERIC_CLOCKEVENTS
        select PCI
+       select PCI_QUIRKS
        select PLAT_ORION_LEGACY
        help
          Support for the following Marvell Kirkwood series SoCs:
index df13a3ffff3514b703584483d1b43a9cb12d3953..9d2d3ba339ff9e79593b233245804f36f6e2e7a2 100644 (file)
@@ -162,7 +162,6 @@ static struct clock_event_device sp804_clockevent = {
        .set_mode       = sp804_set_mode,
        .set_next_event = sp804_set_next_event,
        .rating         = 300,
-       .cpumask        = cpu_all_mask,
 };
 
 static struct irqaction sp804_timer_irq = {
@@ -185,6 +184,7 @@ void __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
        clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
        evt->name = name;
        evt->irq = irq;
+       evt->cpumask = cpu_possible_mask;
 
        setup_irq(irq, &sp804_timer_irq);
        clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
index 7bcd0dfce4b1ce51b8b0e1bfc96ebcf9a0167d12..b47f750386863a820119eb0d66e0a7da84fc5c99 100644 (file)
@@ -63,7 +63,7 @@ static inline int pmu_to_irq(int pin)
 
 static inline int irq_to_pmu(int irq)
 {
-       if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
+       if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
                return irq - IRQ_DOVE_PMU_START;
 
        return -EINVAL;
index 087711524e8a29d1e0d2de7c919bd705976ddbd7..bc4344aa10092dc8568ed8fe3890073e22147b53 100644 (file)
@@ -46,8 +46,20 @@ static void pmu_irq_ack(struct irq_data *d)
        int pin = irq_to_pmu(d->irq);
        u32 u;
 
+       /*
+        * The PMU mask register is not RW0C: it is RW.  This means that
+        * the bits take whatever value is written to them; if you write
+        * a '1', you will set the interrupt.
+        *
+        * Unfortunately this means there is NO race free way to clear
+        * these interrupts.
+        *
+        * So, let's structure the code so that the window is as small as
+        * possible.
+        */
        u = ~(1 << (pin & 31));
-       writel(u, PMU_INTERRUPT_CAUSE);
+       u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
+       writel_relaxed(u, PMU_INTERRUPT_CAUSE);
 }
 
 static struct irq_chip pmu_irq_chip = {
index 1694f01ce2b6782deeb09c02b0ba1a5b4ade0918..6d6bde3e15fad4e4cf094a22c6ae6b8837a1580f 100644 (file)
@@ -410,6 +410,7 @@ void __init ixp4xx_pci_preinit(void)
                 * Enable the IO window to be way up high, at 0xfffffc00
                 */
                local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
+               local_write_config(0x40, 4, 0x000080FF); /* No TRDY time limit */
        } else {
                printk("PCI: IXP4xx is target - No bus scan performed\n");
        }
index fdf91a160884407816d2ceb4fa577e1a9de97054..8c0c0e2d0727317078a93f2241763818acf1ac15 100644 (file)
@@ -67,15 +67,12 @@ static struct map_desc ixp4xx_io_desc[] __initdata = {
                .pfn            = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
                .length         = IXP4XX_PCI_CFG_REGION_SIZE,
                .type           = MT_DEVICE
-       },
-#ifdef CONFIG_DEBUG_LL
-       {       /* Debug UART mapping */
-               .virtual        = (unsigned long)IXP4XX_DEBUG_UART_BASE_VIRT,
-               .pfn            = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS),
-               .length         = IXP4XX_DEBUG_UART_REGION_SIZE,
+       }, {    /* Queue Manager */
+               .virtual        = (unsigned long)IXP4XX_QMGR_BASE_VIRT,
+               .pfn            = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
+               .length         = IXP4XX_QMGR_REGION_SIZE,
                .type           = MT_DEVICE
-       }
-#endif
+       },
 };
 
 void __init ixp4xx_map_io(void)
index b800a031207c9db0eaaff139fb367b68374420f8..53b8348dfcc279c19fed814ac4f1d19afdafe5b6 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
 #include <asm/mach/pci.h>
+#include <asm/system_info.h>
 
 #define SLOT_ETHA              0x0B    /* IDSEL = AD21 */
 #define SLOT_ETHB              0x0C    /* IDSEL = AD20 */
@@ -329,7 +330,7 @@ static struct platform_device device_hss_tab[] = {
 };
 
 
-static struct platform_device *device_tab[6] __initdata = {
+static struct platform_device *device_tab[7] __initdata = {
        &device_flash,          /* index 0 */
 };
 
index 8c9f8d56449231c26f6c24ff74df7268063f6c77..ff686cbc5df44d54547b4816588fe808a5bdd424 100644 (file)
@@ -17,8 +17,8 @@
 #else
                mov     \rp, #0
 #endif
-                orr     \rv, \rp, #0xff000000  @ virtual
-               orr     \rv, \rv, #0x00b00000
+               orr     \rv, \rp, #0xfe000000   @ virtual
+               orr     \rv, \rv, #0x00f00000
                 orr     \rp, \rp, #0xc8000000  @ physical
                 .endm
 
index eb68b61ce975cdff4e96e0a411edec96832b8b2f..c5bae9c035d533e1bd1bba3246bf766e8a53c447 100644 (file)
  *
  * 0x50000000  0x10000000      ioremap'd       EXP BUS
  *
- * 0x6000000   0x00004000      ioremap'd       QMgr
+ * 0xC8000000  0x00013000      0xFEF00000      On-Chip Peripherals
  *
- * 0xC0000000  0x00001000      0xffbff000      PCI CFG
+ * 0xC0000000  0x00001000      0xFEF13000      PCI CFG
  *
- * 0xC4000000  0x00001000      0xffbfe000      EXP CFG
+ * 0xC4000000  0x00001000      0xFEF14000      EXP CFG
  *
- * 0xC8000000  0x00013000      0xffbeb000      On-Chip Peripherals
+ * 0x60000000  0x00004000      0xFEF15000      QMgr
  */
 
 /*
  * Queue Manager
  */
-#define IXP4XX_QMGR_BASE_PHYS          (0x60000000)
-#define IXP4XX_QMGR_REGION_SIZE                (0x00004000)
+#define IXP4XX_QMGR_BASE_PHYS          0x60000000
+#define IXP4XX_QMGR_BASE_VIRT          IOMEM(0xFEF15000)
+#define IXP4XX_QMGR_REGION_SIZE                0x00004000
 
 /*
- * Expansion BUS Configuration registers
+ * Peripheral space, including debug UART. Must be section-aligned so that
+ * it can be used with the low-level debug code.
  */
-#define IXP4XX_EXP_CFG_BASE_PHYS       (0xC4000000)
-#define IXP4XX_EXP_CFG_BASE_VIRT       IOMEM(0xFFBFE000)
-#define IXP4XX_EXP_CFG_REGION_SIZE     (0x00001000)
+#define IXP4XX_PERIPHERAL_BASE_PHYS    0xC8000000
+#define IXP4XX_PERIPHERAL_BASE_VIRT    IOMEM(0xFEF00000)
+#define IXP4XX_PERIPHERAL_REGION_SIZE  0x00013000
 
 /*
  * PCI Config registers
  */
-#define IXP4XX_PCI_CFG_BASE_PHYS       (0xC0000000)
-#define        IXP4XX_PCI_CFG_BASE_VIRT        IOMEM(0xFFBFF000)
-#define IXP4XX_PCI_CFG_REGION_SIZE     (0x00001000)
-
-/*
- * Peripheral space
- */
-#define IXP4XX_PERIPHERAL_BASE_PHYS    (0xC8000000)
-#define IXP4XX_PERIPHERAL_BASE_VIRT    IOMEM(0xFFBEB000)
-#define IXP4XX_PERIPHERAL_REGION_SIZE  (0x00013000)
+#define IXP4XX_PCI_CFG_BASE_PHYS       0xC0000000
+#define IXP4XX_PCI_CFG_BASE_VIRT       IOMEM(0xFEF13000)
+#define IXP4XX_PCI_CFG_REGION_SIZE     0x00001000
 
 /*
- * Debug UART
- *
- * This is basically a remap of UART1 into a region that is section
- * aligned so that it * can be used with the low-level debug code.
+ * Expansion BUS Configuration registers
  */
-#define        IXP4XX_DEBUG_UART_BASE_PHYS     (0xC8000000)
-#define        IXP4XX_DEBUG_UART_BASE_VIRT     IOMEM(0xffb00000)
-#define        IXP4XX_DEBUG_UART_REGION_SIZE   (0x00001000)
+#define IXP4XX_EXP_CFG_BASE_PHYS       0xC4000000
+#define IXP4XX_EXP_CFG_BASE_VIRT       0xFEF14000
+#define IXP4XX_EXP_CFG_REGION_SIZE     0x00001000
 
 #define IXP4XX_EXP_CS0_OFFSET  0x00
 #define IXP4XX_EXP_CS1_OFFSET   0x04
index 9e7cad2d54cb9e9add09c676378e7956b6d70b71..4de8da536dbb2aa11cc0e0ad03d00090fe31e7bb 100644 (file)
@@ -86,7 +86,7 @@ void qmgr_release_queue(unsigned int queue);
 
 static inline void qmgr_put_entry(unsigned int queue, u32 val)
 {
-       extern struct qmgr_regs __iomem *qmgr_regs;
+       struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
 #if DEBUG_QMGR
        BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
 
@@ -99,7 +99,7 @@ static inline void qmgr_put_entry(unsigned int queue, u32 val)
 static inline u32 qmgr_get_entry(unsigned int queue)
 {
        u32 val;
-       extern struct qmgr_regs __iomem *qmgr_regs;
+       const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
        val = __raw_readl(&qmgr_regs->acc[queue][0]);
 #if DEBUG_QMGR
        BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
@@ -112,14 +112,14 @@ static inline u32 qmgr_get_entry(unsigned int queue)
 
 static inline int __qmgr_get_stat1(unsigned int queue)
 {
-       extern struct qmgr_regs __iomem *qmgr_regs;
+       const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
        return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
                >> ((queue & 7) << 2)) & 0xF;
 }
 
 static inline int __qmgr_get_stat2(unsigned int queue)
 {
-       extern struct qmgr_regs __iomem *qmgr_regs;
+       const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
        BUG_ON(queue >= HALF_QUEUES);
        return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
                >> ((queue & 0xF) << 1)) & 0x3;
@@ -145,7 +145,7 @@ static inline int qmgr_stat_empty(unsigned int queue)
  */
 static inline int qmgr_stat_below_low_watermark(unsigned int queue)
 {
-       extern struct qmgr_regs __iomem *qmgr_regs;
+       const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
        if (queue >= HALF_QUEUES)
                return (__raw_readl(&qmgr_regs->statne_h) >>
                        (queue - HALF_QUEUES)) & 0x01;
@@ -172,7 +172,7 @@ static inline int qmgr_stat_above_high_watermark(unsigned int queue)
  */
 static inline int qmgr_stat_full(unsigned int queue)
 {
-       extern struct qmgr_regs __iomem *qmgr_regs;
+       const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
        if (queue >= HALF_QUEUES)
                return (__raw_readl(&qmgr_regs->statf_h) >>
                        (queue - HALF_QUEUES)) & 0x01;
index a17ed79207a4fce30f67b405f758cd07b23f85ad..d4eb09a62863639ebb8205e2d203d4c9d089462f 100644 (file)
 /* NPE mailbox_status value for reset */
 #define RESET_MBOX_STAT                        0x0000F0F0
 
-const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" };
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
 
 #define print_npe(pri, npe, fmt, ...)                                  \
        printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
@@ -724,6 +728,9 @@ module_exit(npe_cleanup_module);
 
 MODULE_AUTHOR("Krzysztof Halasa");
 MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
 
 EXPORT_SYMBOL(npe_names);
 EXPORT_SYMBOL(npe_running);
index 852f7c9f87d06ff69c6defde901732a3926aea7d..9d1b6b7c394cfb5eb5ef2968065f97b5f2b875a7 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/module.h>
 #include <mach/qmgr.h>
 
-struct qmgr_regs __iomem *qmgr_regs;
+static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
 static struct resource *mem_res;
 static spinlock_t qmgr_lock;
 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
@@ -293,12 +293,6 @@ static int qmgr_init(void)
        if (mem_res == NULL)
                return -EBUSY;
 
-       qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
-       if (qmgr_regs == NULL) {
-               err = -ENOMEM;
-               goto error_map;
-       }
-
        /* reset qmgr registers */
        for (i = 0; i < 4; i++) {
                __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
@@ -347,8 +341,6 @@ static int qmgr_init(void)
 error_irq2:
        free_irq(IRQ_IXP4XX_QM1, NULL);
 error_irq:
-       iounmap(qmgr_regs);
-error_map:
        release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
        return err;
 }
@@ -359,7 +351,6 @@ static void qmgr_remove(void)
        free_irq(IRQ_IXP4XX_QM2, NULL);
        synchronize_irq(IRQ_IXP4XX_QM1);
        synchronize_irq(IRQ_IXP4XX_QM2);
-       iounmap(qmgr_regs);
        release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
 }
 
@@ -369,7 +360,6 @@ module_exit(qmgr_remove);
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Krzysztof Halasa");
 
-EXPORT_SYMBOL(qmgr_regs);
 EXPORT_SYMBOL(qmgr_set_irq);
 EXPORT_SYMBOL(qmgr_enable_irq);
 EXPORT_SYMBOL(qmgr_disable_irq);
index ec544918b12c057b56109c1e688a217bf9ac5750..74fc5a074fc453e8ba78d686ef8ecf9409f14c1b 100644 (file)
@@ -207,14 +207,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
        return 1;
 }
 
+/*
+ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
+ * is operating as a root complex this needs to be switched to
+ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
+ * the device. Decoding setup is handled by the orion code.
+ */
 static void __devinit rc_pci_fixup(struct pci_dev *dev)
 {
-       /*
-        * Prevent enumeration of root complex.
-        */
        if (dev->bus->parent == NULL && dev->devfn == 0) {
                int i;
 
+               dev->class &= 0xff;
+               dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                        dev->resource[i].start = 0;
                        dev->resource[i].end   = 0;
index db98e7021f0daf7507fe55f91f1e73ef761a6dc3..0abd1c4698875f4edbb5be3cd506206c676019b6 100644 (file)
@@ -473,12 +473,13 @@ int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
                         chan->number, __func__, buf);
 
-               if (chan->end == NULL)
+               if (chan->end == NULL) {
                        pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
                                 chan->number, __func__, chan);
-
-               chan->end->next = buf;
-               chan->end = buf;
+               } else {
+                       chan->end->next = buf;
+                       chan->end = buf;
+               }
        }
 
        /* if necessary, update the next buffer field */
index 6d909faebf28d155020ef96e17654d5ae74bed6f..656a6f291a35195bc697a1f5e5112af7f1eab356 100644 (file)
@@ -392,7 +392,7 @@ __SYSCALL(367, sys_fanotify_init)
 __SYSCALL(368, compat_sys_fanotify_mark_wrapper)
 __SYSCALL(369, sys_prlimit64)
 __SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, sys_open_by_handle_at)
+__SYSCALL(371, compat_sys_open_by_handle_at)
 __SYSCALL(372, sys_clock_adjtime)
 __SYSCALL(373, sys_syncfs)
 
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h
new file mode 100644 (file)
index 0000000..ecead15
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SETUP_H
+#define _ASM_C6X_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+#ifndef __ASSEMBLY__
+extern char c6x_command_line[COMMAND_LINE_SIZE];
+
+extern int c6x_add_memory(phys_addr_t start, unsigned long size);
+
+extern unsigned long ram_start;
+extern unsigned long ram_end;
+
+extern int c6x_num_cores;
+extern unsigned int c6x_silicon_rev;
+extern unsigned int c6x_devstat;
+extern unsigned char c6x_fuse_mac[6];
+
+extern void machine_init(unsigned long dt_ptr);
+extern void time_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_C6X_SETUP_H */
index c312b424c4331b58aed335e43a3044e6088fce2c..e9bc2b2b814740d82e9a4fb9eae800a3bbfe5aad 100644 (file)
@@ -1,6 +1,8 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += kvm_para.h
+
 header-y += byteorder.h
 header-y += kvm_para.h
 header-y += ptrace.h
diff --git a/arch/c6x/include/uapi/asm/kvm_para.h b/arch/c6x/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index 14fab8f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
index a01e31896fa90937dec74fa746e6745c72a94891..ad9ac97a8dad3be119ab72754ab24f871c6ff880 100644 (file)
@@ -1,33 +1,6 @@
-/*
- *  Port on Texas Instruments TMS320C6x architecture
- *
- *  Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
- *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_SETUP_H
-#define _ASM_C6X_SETUP_H
+#ifndef _UAPI_ASM_C6X_SETUP_H
+#define _UAPI_ASM_C6X_SETUP_H
 
 #define COMMAND_LINE_SIZE   1024
 
-#ifndef __ASSEMBLY__
-extern char c6x_command_line[COMMAND_LINE_SIZE];
-
-extern int c6x_add_memory(phys_addr_t start, unsigned long size);
-
-extern unsigned long ram_start;
-extern unsigned long ram_end;
-
-extern int c6x_num_cores;
-extern unsigned int c6x_silicon_rev;
-extern unsigned int c6x_devstat;
-extern unsigned char c6x_fuse_mac[6];
-
-extern void machine_init(unsigned long dt_ptr);
-extern void time_init(void);
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_C6X_SETUP_H */
+#endif /* _UAPI_ASM_C6X_SETUP_H */
index 5449c36018fe2d269b2daaa162a25a39bfa53c9b..0ed6157dd256a81aeefc9e8de971f834f313a11d 100644 (file)
@@ -277,6 +277,8 @@ work_rescheduled:
  [A1]  BNOP    .S1     work_resched,5
 
 work_notifysig:
+       ;; enable interrupts for do_notify_resume()
+       UNMASK_INT B2
        B       .S2     do_notify_resume
        LDW     .D2T1   *+SP(REGS__END+8),A6 ; syscall flag
        ADDKPC  .S2     resume_userspace,B3,1
@@ -427,8 +429,7 @@ ENTRY(ret_from_kernel_execve)
 ENDPROC(ret_from_kernel_execve)
 
        ;;
-       ;; These are the interrupt handlers, responsible for calling __do_IRQ()
-       ;; int6 is used for syscalls (see _system_call entry)
+       ;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
        ;;
        .macro SAVE_ALL_INT
        SAVE_ALL IRP,ITSR
index 3847e5b9c601f68e189ca21749300b6b7e416415..3903e3d11f5a557d673fbcfcb051e1fd17e49392 100644 (file)
@@ -111,7 +111,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
 
        /* It is more difficult to avoid calling this function than to
         call it and ignore errors. */
-       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1) == -EFAULT)
                goto badframe;
 
        return rval;
index bd94946a18f343da04b48aba1a13b83f6e724c18..ef99db994c2f9738dae4cc80a320aebcb55930f3 100644 (file)
@@ -95,7 +95,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                                             pte_t *ptep, pte_t pte,
                                             int dirty)
 {
-       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+       int changed = !pte_same(*ptep, pte);
+
+       if (changed) {
+               set_pte_at(vma->vm_mm, addr, ptep, pte);
+               /*
+                * There could be some standard sized pages in there,
+                * get them all.
+                */
+               flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+       }
+       return changed;
 }
 
 static inline pte_t huge_ptep_get(pte_t *ptep)
index b1fb7af3c35058f2e739b4ec41fb28d62d00c2d2..cce3782c96c9b792e412abc32fed55547dcde7c9 100644 (file)
@@ -510,7 +510,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                                c->cputype = CPU_R3000A;
                                __cpu_name[cpu] = "R3000A";
                        }
-                       break;
                } else {
                        c->cputype = CPU_R3000;
                        __cpu_name[cpu] = "R3000";
index a6c133212003ed96537e6f950ec71c59d98559fb..9b00362f32f6d95d4577817bbd39757f880d0639 100644 (file)
@@ -36,6 +36,11 @@ FEXPORT(ret_from_exception)
 FEXPORT(ret_from_irq)
        LONG_S  s0, TI_REGS($28)
 FEXPORT(__ret_from_irq)
+/*
+ * We can be coming here from a syscall done in the kernel space,
+ * e.g. a failed kernel_execve().
+ */
+resume_userspace_check:
        LONG_L  t0, PT_STATUS(sp)               # returning to kernel mode?
        andi    t0, t0, KU_USER
        beqz    t0, resume_kernel
@@ -162,7 +167,7 @@ work_notifysig:                             # deal with pending signals and
        move    a0, sp
        li      a1, 0
        jal     do_notify_resume        # a2 already loaded
-       j       resume_userspace
+       j       resume_userspace_check
 
 FEXPORT(syscall_exit_partial)
        local_irq_disable               # make sure need_resched doesn't
index f6ba8381ee0186c5dfc1f19879ae4c39d15d52e9..86ec03f0e00c48f966e428eb1a7844cfcf3eff1a 100644 (file)
@@ -397,14 +397,14 @@ EXPORT(sysn32_call_table)
        PTR     sys_timerfd_create
        PTR     compat_sys_timerfd_gettime      /* 6285 */
        PTR     compat_sys_timerfd_settime
-       PTR     sys_signalfd4
+       PTR     compat_sys_signalfd4
        PTR     sys_eventfd2
        PTR     sys_epoll_create1
        PTR     sys_dup3                        /* 6290 */
        PTR     sys_pipe2
        PTR     sys_inotify_init1
-       PTR     sys_preadv
-       PTR     sys_pwritev
+       PTR     compat_sys_preadv
+       PTR     compat_sys_pwritev
        PTR     compat_sys_rt_tgsigqueueinfo    /* 6295 */
        PTR     sys_perf_event_open
        PTR     sys_accept4
index 4b9b935a070e0c4d0160f2c259c3dc553c40b35e..88e79ad6f811e79d07fc45cd51cf2a0580cb20bc 100644 (file)
@@ -120,18 +120,11 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
        if (cpu_context(cpu, mm) != 0) {
                unsigned long size, flags;
-               int huge = is_vm_hugetlb_page(vma);
 
                ENTER_CRITICAL(flags);
-               if (huge) {
-                       start = round_down(start, HPAGE_SIZE);
-                       end = round_up(end, HPAGE_SIZE);
-                       size = (end - start) >> HPAGE_SHIFT;
-               } else {
-                       start = round_down(start, PAGE_SIZE << 1);
-                       end = round_up(end, PAGE_SIZE << 1);
-                       size = (end - start) >> (PAGE_SHIFT + 1);
-               }
+               start = round_down(start, PAGE_SIZE << 1);
+               end = round_up(end, PAGE_SIZE << 1);
+               size = (end - start) >> (PAGE_SHIFT + 1);
                if (size <= current_cpu_data.tlbsize/2) {
                        int oldpid = read_c0_entryhi();
                        int newpid = cpu_asid(cpu, mm);
@@ -140,10 +133,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                int idx;
 
                                write_c0_entryhi(start | newpid);
-                               if (huge)
-                                       start += HPAGE_SIZE;
-                               else
-                                       start += (PAGE_SIZE << 1);
+                               start += (PAGE_SIZE << 1);
                                mtc0_tlbw_hazard();
                                tlb_probe();
                                tlb_probe_hazard();
index 30110297f4f9509d6437c8f5c3e80220add6e2a9..ddedc8a77861ac4372832be8e59ce60d54d616f7 100644 (file)
@@ -84,7 +84,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
        sigset_t set;
-       stack_t st;
 
        /*
         * Since we stacked the signal on a dword boundary,
@@ -104,11 +103,10 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
                goto badframe;
 
-       if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
-               goto badframe;
        /* It is more difficult to avoid calling this function than to
           call it and ignore errors.  */
-       do_sigaltstack(&st, NULL, regs->sp);
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
+               goto badframe;
 
        return regs->gpr[11];
 
index 3735abd7f8f6f067488b20de9b44ab487ac2dff2..cbf5d59d5d6a8943edd6c74a0f1e6013aa640c19 100644 (file)
@@ -60,7 +60,7 @@
        ENTRY_SAME(fork_wrapper)
        ENTRY_SAME(read)
        ENTRY_SAME(write)
-       ENTRY_SAME(open)                /* 5 */
+       ENTRY_COMP(open)                /* 5 */
        ENTRY_SAME(close)
        ENTRY_SAME(waitpid)
        ENTRY_SAME(creat)
index ad79b846535c4d9345e884341f40481e9d38c492..827e094a2f494d09bdf1936cde25df431ec7c012 100644 (file)
@@ -28,7 +28,7 @@ ENTRY(sys32_open_wrapper)
        llgtr   %r2,%r2                 # const char *
        lgfr    %r3,%r3                 # int
        lgfr    %r4,%r4                 # int
-       jg      sys_open                # branch to system call
+       jg      compat_sys_open         # branch to system call
 
 ENTRY(sys32_close_wrapper)
        llgfr   %r2,%r2                 # unsigned int
index c268bbf8b41047b2717a5a41a7ed86178a7df24e..02353bde92d883c3aeb0557ba423e0e3c16edfad 100644 (file)
@@ -148,7 +148,6 @@ score_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
        sigset_t set;
-       stack_t st;
        int sig;
 
        /* Always make any pending restarted system calls return -EINTR */
@@ -168,12 +167,10 @@ score_rt_sigreturn(struct pt_regs *regs)
        else if (sig)
                force_sig(sig, current);
 
-       if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
-               goto badframe;
-
        /* It is more difficult to avoid calling this function than to
           call it and ignore errors.  */
-       do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]);
+       if (do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs->regs[0]) == -EFAULT)
+               goto badframe;
        regs->is_syscall = 0;
 
        __asm__ __volatile__(
index 23853814bd174284a3a75c62b88154ac8fb890d3..d867cd95a62261c7ae25a91b4ce5d36b179b8f0f 100644 (file)
@@ -347,7 +347,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
 {
        struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
        sigset_t set;
-       stack_t __user st;
        long long ret;
 
        /* Always make any pending restarted system calls return -EINTR */
@@ -365,11 +364,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
                goto badframe;
        regs->pc -= 4;
 
-       if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
-               goto badframe;
        /* It is more difficult to avoid calling this function than to
           call it and ignore errors.  */
-       do_sigaltstack(&st, NULL, REF_REG_SP);
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, REF_REG_SP) == -EFAULT)
+               goto badframe;
 
        return (int) ret;
 
index c0a798fcf030ab7903d0508d0202d1a813cb52ca..bb7c95161d7188f5defd780ddec934b0395b48b0 100644 (file)
@@ -81,18 +81,18 @@ static void usage(void)
 
 static int start_line(const char *line)
 {
-       if (strcmp(line + 8, " T _start\n") == 0)
+       if (strcmp(line + 10, " _start\n") == 0)
                return 1;
-       else if (strcmp(line + 16, " T _start\n") == 0)
+       else if (strcmp(line + 18, " _start\n") == 0)
                return 1;
        return 0;
 }
 
 static int end_line(const char *line)
 {
-       if (strcmp(line + 8, " A _end\n") == 0)
+       if (strcmp(line + 10, " _end\n") == 0)
                return 1;
-       else if (strcmp (line + 16, " A _end\n") == 0)
+       else if (strcmp (line + 18, " _end\n") == 0)
                return 1;
        return 0;
 }
@@ -100,8 +100,8 @@ static int end_line(const char *line)
 /*
  * Find address for start and end in System.map.
  * The file looks like this:
- * f0004000 T _start
- * f0379f79 A _end
+ * f0004000 ... _start
+ * f0379f79 ... _end
  * 1234567890123456
  * ^coloumn 1
  * There is support for 64 bit addresses too.
index 44025f4ba41f883d61647f180575651ee5113645..8475a474273ae1c615ce4277dd3bf74ba1ce9f0a 100644 (file)
@@ -47,7 +47,7 @@ STUB: sra     REG1, 0, REG1; \
        sra     REG4, 0, REG4
 
 SIGN1(sys32_exit, sparc_exit, %o0)
-SIGN1(sys32_exit_group, sys_exit_group, %o0)
+SIGN1(sys32_exit_group, sparc_exit_group, %o0)
 SIGN1(sys32_wait4, compat_sys_wait4, %o2)
 SIGN1(sys32_creat, sys_creat, %o1)
 SIGN1(sys32_mknod, sys_mknod, %o1)
index 7f5f65d0b3fde92235c838bd976916bc863d0315..bf2347794e3323c3220c92e4a96849d8c89d5fbf 100644 (file)
@@ -118,10 +118,20 @@ ret_from_syscall:
        ba,pt   %xcc, ret_sys_call
         ldx    [%sp + PTREGS_OFF + PT_V9_I0], %o0
 
+       .globl  sparc_exit_group
+       .type   sparc_exit_group,#function
+sparc_exit_group:
+       sethi   %hi(sys_exit_group), %g7
+       ba,pt   %xcc, 1f
+        or     %g7, %lo(sys_exit_group), %g7
+       .size   sparc_exit_group,.-sparc_exit_group
+
        .globl  sparc_exit
        .type   sparc_exit,#function
 sparc_exit:
-       rdpr    %pstate, %g2
+       sethi   %hi(sys_exit), %g7
+       or      %g7, %lo(sys_exit), %g7
+1:     rdpr    %pstate, %g2
        wrpr    %g2, PSTATE_IE, %pstate
        rdpr    %otherwin, %g1
        rdpr    %cansave, %g3
@@ -129,7 +139,7 @@ sparc_exit:
        wrpr    %g3, 0x0, %cansave
        wrpr    %g0, 0x0, %otherwin
        wrpr    %g2, 0x0, %pstate
-       ba,pt   %xcc, sys_exit
+       jmpl    %g7, %g0
         stb    %g0, [%g6 + TI_WSAVED]
        .size   sparc_exit,.-sparc_exit
 
index 1c9af9fa38e9b0b489cb38899685e4c4c76b6584..017b74a63dcb995dd9c18049bd1a1604d329fea5 100644 (file)
@@ -133,7 +133,7 @@ sys_call_table:
 /*170*/        .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
        .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
 /*180*/        .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
-       .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
+       .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
 /*190*/        .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
        .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask
 /*200*/        .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
index 3a8ece7d09ca2015b3e2c218a3e7a84e951d9bee..0d7103c9eff3d21c29f15f2f54d5aeabcb0a8449 100644 (file)
@@ -32,13 +32,14 @@ void flush_thread(void)
                       "err = %d\n", ret);
                force_sig(SIGKILL, current);
        }
+       get_safe_registers(current_pt_regs()->regs.gp,
+                          current_pt_regs()->regs.fp);
 
        __switch_mm(&current->mm->context.id);
 }
 
 void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
 {
-       get_safe_registers(regs->regs.gp, regs->regs.fp);
        PT_REGS_IP(regs) = eip;
        PT_REGS_SP(regs) = esp;
        current->ptrace &= ~PT_DTRACE;
index 66e5f0ef0523ce2961a950c4657719e31c3e8db1..79fd8a3418f9d5cca461bef7fa0fb874c0d69aee 100644 (file)
@@ -12,6 +12,7 @@ header-y += mce.h
 header-y += msr-index.h
 header-y += msr.h
 header-y += mtrr.h
+header-y += perf_regs.h
 header-y += posix_types_32.h
 header-y += posix_types_64.h
 header-y += posix_types_x32.h
@@ -19,8 +20,10 @@ header-y += prctl.h
 header-y += processor-flags.h
 header-y += ptrace-abi.h
 header-y += sigcontext32.h
+header-y += svm.h
 header-y += ucontext.h
 header-y += vm86.h
+header-y += vmx.h
 header-y += vsyscall.h
 
 genhdr-y += unistd_32.h
index 831dbb9c6c028537402bd65d46e94320a70ee7de..41ab26ea65648b43a910f97cd7903c8bbbc2516c 100644 (file)
@@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk)
 typedef struct { int preload; } fpu_switch_t;
 
 /*
- * FIXME! We could do a totally lazy restore, but we need to
- * add a per-cpu "this was the task that last touched the FPU
- * on this CPU" variable, and the task needs to have a "I last
- * touched the FPU on this CPU" and check them.
+ * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * on this CPU.
  *
- * We don't do that yet, so "fpu_lazy_restore()" always returns
- * false, but some day..
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
  */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+       per_cpu(fpu_owner_task, cpu) = NULL;
+}
+
 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
 {
        return new == this_cpu_read_stable(fpu_owner_task) &&
index 957a47aec64e12d1833af368c337e4b4e03f56bd..4dac2f68ed4aaf2bf35e10687dbdf44234829347 100644 (file)
@@ -292,8 +292,8 @@ default_entry:
  *     be using the global pages. 
  *
  *     NOTE! If we are on a 486 we may have no cr4 at all!
- *     Specifically, cr4 exists if and only if CPUID exists,
- *     which in turn exists if and only if EFLAGS.ID exists.
+ *     Specifically, cr4 exists if and only if CPUID exists
+ *     and has flags other than the FPU flag set.
  */
        movl $X86_EFLAGS_ID,%ecx
        pushl %ecx
@@ -308,6 +308,11 @@ default_entry:
        testl %ecx,%eax
        jz 6f                   # No ID flag = no CPUID = no CR4
 
+       movl $1,%eax
+       cpuid
+       andl $~1,%edx           # Ignore CPUID.FPU
+       jz 6f                   # No flags or only CPUID.FPU = no CR4
+
        movl pa(mmu_cr4_features),%eax
        movl %eax,%cr4
 
index 5e0596b0632e244676c686bb0b338ea1e255473f..974b67e46dd0edeb38bbffcc77442d4f57e514d5 100644 (file)
@@ -1541,6 +1541,13 @@ void syscall_trace_leave(struct pt_regs *regs)
 {
        bool step;
 
+       /*
+        * We may come here right after calling schedule_user()
+        * or do_notify_resume(), in which case we can be in RCU
+        * user mode.
+        */
+       rcu_user_exit();
+
        audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
index c80a33bc528b8e33ec6a1b9b4e6ca02964c21d7e..f3e2ec878b8c3ecad60901a2818d0ad30eeb29ca 100644 (file)
@@ -68,6 +68,8 @@
 #include <asm/mwait.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
@@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
+       /* the FPU context is blank, nobody can own it */
+       __cpu_disable_lazy_restore(cpu);
+
        err = do_boot_cpu(apicid, cpu, tidle);
        if (err) {
                pr_debug("do_boot_cpu failed %d\n", err);
index 39171cb307ea05d6687bfc083ddd87935cf4ca4d..bba39bfa1c4b03806cf1f42773cd8041f18b8759 100644 (file)
@@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
                        _ASM_EXTABLE(1b, 3b)                            \
                        : "=m" ((ctxt)->eflags), "=&r" (_tmp),          \
                          "+a" (*rax), "+d" (*rdx), "+qm"(_ex)          \
-                       : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val),     \
-                         "a" (*rax), "d" (*rdx));                      \
+                       : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val));    \
        } while (0)
 
 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
index 89b30f32ba68b4f1e5cd58fc5911a95213abf163..ff7bb8a42ed62b042bff5f786b94775f8bfb87bc 100644 (file)
@@ -1961,6 +1961,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
     res = loader_verify(lb, dev, rec);
     if (res)
       break;
+    rec = ihex_next_binrec(rec);
   }
   release_firmware(fw);
   if (!res)
index fbd9b2b850ef1de0a84a57182ff79bdd38064896..c58ea9b80b1a3b63e65a52236d71c8db4cfa02c4 100644 (file)
@@ -127,12 +127,12 @@ config HW_RANDOM_VIA
          If unsure, say Y.
 
 config HW_RANDOM_IXP4XX
-       tristate "Intel IXP4xx NPU HW Random Number Generator support"
+       tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
        depends on HW_RANDOM && ARCH_IXP4XX
        default HW_RANDOM
        ---help---
-         This driver provides kernel-side support for the Random
-         Number Generator hardware found on the Intel IXP4xx NPU.
+         This driver provides kernel-side support for the Pseudo-Random
+         Number Generator hardware found on the Intel IXP45x/46x NPU.
 
          To compile this driver as a module, choose M here: the
          module will be called ixp4xx-rng.
index 263567f5f3923fb12ae13d6401aa84f1b636aa07..beec1627db3c1b147fb4be577c61697ef2e1005f 100644 (file)
@@ -45,6 +45,9 @@ static int __init ixp4xx_rng_init(void)
        void __iomem * rng_base;
        int err;
 
+       if (!cpu_is_ixp46x()) /* includes IXP455 */
+               return -ENOSYS;
+
        rng_base = ioremap(0x70002100, 4);
        if (!rng_base)
                return -ENOMEM;
@@ -68,5 +71,5 @@ module_init(ixp4xx_rng_init);
 module_exit(ixp4xx_rng_exit);
 
 MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
+MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
 MODULE_LICENSE("GPL");
index 0bb207eaef2ff65e854866fddfdb11fce27cc066..54a3a6d09819922486f4de420f4ed0f96a6e7bb0 100644 (file)
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
 
 static const struct file_operations raw_fops = {
        .read           = do_sync_read,
-       .aio_read       = blkdev_aio_read,
+       .aio_read       = generic_file_aio_read,
        .write          = do_sync_write,
        .aio_write      = blkdev_aio_write,
        .fsync          = blkdev_fsync,
index 308c7fb92a60b25a27d92ef11a9acbe80f23b921..f6644f59fd9da1042cec58487791f689d7ed127a 100644 (file)
@@ -224,7 +224,7 @@ config CRYPTO_DEV_TALITOS
 
 config CRYPTO_DEV_IXP4XX
        tristate "Driver for IXP4xx crypto hardware acceleration"
-       depends on ARCH_IXP4XX
+       depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
        select CRYPTO_DES
        select CRYPTO_ALGAPI
        select CRYPTO_AUTHENC
index 8f3f74ce8c7fd7ac95e241c2c4504f06a52da38c..21180d6cad6e27f2f316a04b1e98fb2e61ac90d3 100644 (file)
@@ -750,12 +750,12 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
        }
        if (cipher_cfg & MOD_AES) {
                switch (key_len) {
-                       case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
-                       case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
-                       case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
-                       default:
-                               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-                               return -EINVAL;
+               case 16: keylen_cfg = MOD_AES128; break;
+               case 24: keylen_cfg = MOD_AES192; break;
+               case 32: keylen_cfg = MOD_AES256; break;
+               default:
+                       *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+                       return -EINVAL;
                }
                cipher_cfg |= keylen_cfg;
        } else if (cipher_cfg & MOD_3DES) {
index 90f0b730e9bb1ee4a23a2941ea60021a12798d1f..75c0a1a85fc3ce553cb38f7099806874e26a2645 100644 (file)
@@ -416,10 +416,18 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
                dimm->cschannel = chn;
 
                /* Increment csrow location */
-               row++;
-               if (row == tot_csrows) {
-                       row = 0;
+               if (layers[0].is_virt_csrow) {
                        chn++;
+                       if (chn == tot_channels) {
+                               chn = 0;
+                               row++;
+                       }
+               } else {
+                       row++;
+                       if (row == tot_csrows) {
+                               row = 0;
+                               chn++;
+                       }
                }
 
                /* Increment dimm location */
index a09d0667f72acb4aca4dda012b0d172a927182eb..9d669cd43618b605d7db354fde86df8c90c64f08 100644 (file)
@@ -197,8 +197,8 @@ static const char *ferr_fat_fbd_name[] = {
        [0]  = "Memory Write error on non-redundant retry or "
               "FBD configuration Write error on retry",
 };
-#define GET_FBD_FAT_IDX(fbderr)        (fbderr & (3 << 28))
-#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
+#define GET_FBD_FAT_IDX(fbderr)        (((fbderr) >> 28) & 3)
+#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
 
 #define FERR_NF_FBD    0xa0
 static const char *ferr_nf_fbd_name[] = {
@@ -225,7 +225,7 @@ static const char *ferr_nf_fbd_name[] = {
        [1]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
        [0]  = "Uncorrectable Data ECC on Replay",
 };
-#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
+#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
                              (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
                              (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
@@ -464,7 +464,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
                errnum = find_first_bit(&errors,
                                        ARRAY_SIZE(ferr_nf_fbd_name));
                specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
-               branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
+               branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
 
                pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
                        REDMEMA, &syndrome);
index 3672101023bd8d44c7fb136601a72a0290a9e936..10c8c00d6469398a211dc23b413d4d6495851b5a 100644 (file)
@@ -816,7 +816,7 @@ static ssize_t i7core_inject_store_##param(                 \
        struct device_attribute *mattr,                         \
        const char *data, size_t count)                         \
 {                                                              \
-       struct mem_ctl_info *mci = to_mci(dev);                 \
+       struct mem_ctl_info *mci = dev_get_drvdata(dev);        \
        struct i7core_pvt *pvt;                                 \
        long value;                                             \
        int rc;                                                 \
@@ -845,7 +845,7 @@ static ssize_t i7core_inject_show_##param(                  \
        struct device_attribute *mattr,                         \
        char *data)                                             \
 {                                                              \
-       struct mem_ctl_info *mci = to_mci(dev);                 \
+       struct mem_ctl_info *mci = dev_get_drvdata(dev);        \
        struct i7core_pvt *pvt;                                 \
                                                                \
        pvt = mci->pvt_info;                                    \
@@ -1052,7 +1052,7 @@ static ssize_t i7core_show_counter_##param(                       \
        struct device_attribute *mattr,                         \
        char *data)                                             \
 {                                                              \
-       struct mem_ctl_info *mci = to_mci(dev);                 \
+       struct mem_ctl_info *mci = dev_get_drvdata(dev);        \
        struct i7core_pvt *pvt = mci->pvt_info;                 \
                                                                \
        edac_dbg(1, "\n");                                      \
index 069e26c11c4f761997bbf2afb6b9bf1bf2d6b039..a98020409fa9933181fe93a550fdb2abaf572ebd 100644 (file)
@@ -370,10 +370,6 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
 static void i82975x_init_csrows(struct mem_ctl_info *mci,
                struct pci_dev *pdev, void __iomem *mch_window)
 {
-       static const char *labels[4] = {
-                                                       "DIMM A1", "DIMM A2",
-                                                       "DIMM B1", "DIMM B2"
-                                               };
        struct csrow_info *csrow;
        unsigned long last_cumul_size;
        u8 value;
@@ -423,9 +419,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
                        dimm = mci->csrows[index]->channels[chan]->dimm;
 
                        dimm->nr_pages = nr_pages / csrow->nr_channels;
-                       strncpy(csrow->channels[chan]->dimm->label,
-                                       labels[(index >> 1) + (chan * 2)],
-                                       EDAC_MC_LABEL_LEN);
+
+                       snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
+                                (chan == 0) ? 'A' : 'B',
+                                index);
                        dimm->grain = 1 << 7;   /* 128Byte cache-line resolution */
                        dimm->dtype = i82975x_dram_type(mch_window, index);
                        dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
index 241ad1eeec64d2097a4416bd76026723d384a215..f2df06c603f72a522527226aa9622cfd7e4c09fa 100644 (file)
@@ -226,6 +226,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
         * already updated or not by exynos_drm_encoder_dpms function.
         */
        exynos_encoder->updated = true;
+
+       /*
+        * In case of setcrtc, there is no way to update encoder's dpms
+        * so update it here.
+        */
+       exynos_encoder->dpms = DRM_MODE_DPMS_ON;
 }
 
 static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
@@ -507,6 +513,6 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
         * because the setting for disabling the overlay will be updated
         * at vsync.
         */
-       if (overlay_ops->wait_for_vblank)
+       if (overlay_ops && overlay_ops->wait_for_vblank)
                overlay_ops->wait_for_vblank(manager->dev);
 }
index 67eb6ba56edf4b1bffe805a3bd5c06973c532726..e7466c4414cb8f04e8b462ce98a28dbe6a539b5e 100644 (file)
@@ -87,7 +87,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 
        dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
        fbi->screen_base = buffer->kvaddr + offset;
-       fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
+       fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
+                               offset);
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
 
index 130a2b510d4aebe1c9852b96457aea52e9857a15..e08478f19f1a0fd74f78136cde6ca3be00c69165 100644 (file)
@@ -61,11 +61,11 @@ struct fimd_driver_data {
        unsigned int timing_base;
 };
 
-struct fimd_driver_data exynos4_fimd_driver_data = {
+static struct fimd_driver_data exynos4_fimd_driver_data = {
        .timing_base = 0x0,
 };
 
-struct fimd_driver_data exynos5_fimd_driver_data = {
+static struct fimd_driver_data exynos5_fimd_driver_data = {
        .timing_base = 0x20000,
 };
 
index 60b877a388c280997d6042302c11fb2a7e8a0553..862ca1eb21020fdc2faf5ebc5c65b5b3a2899731 100644 (file)
@@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                return ret;
 
        plane->crtc = crtc;
-       plane->fb = crtc->fb;
 
        exynos_plane_commit(plane);
        exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
index 107f09befe929540e9ad801ff2bbdfe97a6b651d..9b285da4449b005c3c38a2ac7001c3c222b2deed 100644 (file)
@@ -1796,7 +1796,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         */
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        gfp = mapping_gfp_mask(mapping);
-       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
        gfp &= ~(__GFP_IO | __GFP_WAIT);
        for_each_sg(st->sgl, sg, page_count, i) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
@@ -1809,7 +1809,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                         * our own buffer, now let the real VM do its job and
                         * go down in flames if truly OOM.
                         */
-                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
                        gfp |= __GFP_IO | __GFP_WAIT;
 
                        i915_gem_shrink_all(dev_priv);
@@ -1817,7 +1817,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        if (IS_ERR(page))
                                goto err_pages;
 
-                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+                       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
 
index 0ed6baff4b0c2146d133006c52542b48076c5cc4..56846ed5ee55b1b4686afb4a2fb56a359f80c9bf 100644 (file)
@@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 
        edp = find_section(bdb, BDB_EDP);
        if (!edp) {
-               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
-                       DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
-                                     "supported, assume %dbpp panel color "
-                                     "depth.\n",
-                                     dev_priv->edp.bpp);
-               }
+               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+                       DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
                return;
        }
 
@@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        dev_priv->lvds_use_ssc = 1;
        dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
        DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
-       /* eDP data */
-       dev_priv->edp.bpp = 18;
 }
 
 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
index 4154bcd7a0708b957d4b176607662933ac49fc0d..b426d44a2b055d5a8afb31a0ba4bc9fd40ea0554 100644 (file)
@@ -3845,7 +3845,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                        /* Use VBT settings if we have an eDP panel */
                        unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 
-                       if (edp_bpc < display_bpc) {
+                       if (edp_bpc && edp_bpc < display_bpc) {
                                DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
                                display_bpc = edp_bpc;
                        }
index 72f41aaa71ff330637bdc82d4ee8e0c04091bdf2..442968f8b20151e16d4b8bbd6e15093fb4d7ed85 100644 (file)
@@ -2373,15 +2373,9 @@ int intel_enable_rc6(const struct drm_device *dev)
        if (i915_enable_rc6 >= 0)
                return i915_enable_rc6;
 
-       if (INTEL_INFO(dev)->gen == 5) {
-#ifdef CONFIG_INTEL_IOMMU
-               /* Disable rc6 on ilk if VT-d is on. */
-               if (intel_iommu_gfx_mapped)
-                       return false;
-#endif
-               DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
-               return INTEL_RC6_ENABLE;
-       }
+       /* Disable RC6 on Ironlake */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
 
        if (IS_HASWELL(dev)) {
                DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
index c600fb06e25e6a4798cd7f7f5841593025d3f484..a6ac0b416964c3751475522654cac0ccb6ccc574 100644 (file)
@@ -2201,7 +2201,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
                connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
                intel_sdvo->is_hdmi = true;
        }
-       intel_sdvo->base.cloneable = true;
 
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (intel_sdvo->is_hdmi)
@@ -2232,7 +2231,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 
        intel_sdvo->is_tv = true;
        intel_sdvo->base.needs_tv_clock = true;
-       intel_sdvo->base.cloneable = false;
 
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
@@ -2275,8 +2273,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
        }
 
-       intel_sdvo->base.cloneable = true;
-
        intel_sdvo_connector_init(intel_sdvo_connector,
                                  intel_sdvo);
        return true;
@@ -2307,9 +2303,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
        }
 
-       /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
-       intel_sdvo->base.cloneable = false;
-
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
@@ -2721,6 +2714,16 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                goto err_output;
        }
 
+       /*
+        * Cloning SDVO with anything is often impossible, since the SDVO
+        * encoder can request a special input timing mode. And even if that's
+        * not the case we have evidence that cloning a plain unscaled mode with
+        * VGA doesn't really work. Furthermore the cloning flags are way too
+        * simplistic anyway to express such constraints, so just give up on
+        * cloning for SDVO encoders.
+        */
+       intel_sdvo->base.cloneable = false;
+
        /* Only enable the hotplug irq if we need it, to work around noisy
         * hotplug lines.
         */
index 3bce0299f64a664f9d1de78a6f452c907221694f..24d932f5320324fcdb726185a08481b4738e9ccf 100644 (file)
@@ -1696,42 +1696,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                        return ATOM_PPLL2;
                DRM_ERROR("unable to allocate a PPLL\n");
                return ATOM_PPLL_INVALID;
-       } else if (ASIC_IS_AVIVO(rdev)) {
-               /* in DP mode, the DP ref clock can come from either PPLL
-                * depending on the asic:
-                * DCE3: PPLL1 or PPLL2
-                */
-               if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
-                       /* use the same PPLL for all DP monitors */
-                       pll = radeon_get_shared_dp_ppll(crtc);
-                       if (pll != ATOM_PPLL_INVALID)
-                               return pll;
-               } else {
-                       /* use the same PPLL for all monitors with the same clock */
-                       pll = radeon_get_shared_nondp_ppll(crtc);
-                       if (pll != ATOM_PPLL_INVALID)
-                               return pll;
-               }
-               /* all other cases */
-               pll_in_use = radeon_get_pll_use_mask(crtc);
-               /* the order shouldn't matter here, but we probably
-                * need this until we have atomic modeset
-                */
-               if (rdev->flags & RADEON_IS_IGP) {
-                       if (!(pll_in_use & (1 << ATOM_PPLL1)))
-                               return ATOM_PPLL1;
-                       if (!(pll_in_use & (1 << ATOM_PPLL2)))
-                               return ATOM_PPLL2;
-               } else {
-                       if (!(pll_in_use & (1 << ATOM_PPLL2)))
-                               return ATOM_PPLL2;
-                       if (!(pll_in_use & (1 << ATOM_PPLL1)))
-                               return ATOM_PPLL1;
-               }
-               DRM_ERROR("unable to allocate a PPLL\n");
-               return ATOM_PPLL_INVALID;
        } else {
                /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+               /* some atombios (observed in some DCE2/DCE3) code have a bug,
+                * the matching btw pll and crtc is done through
+                * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+                * pll (1 or 2) to select which register to write. ie if using
+                * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+                * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+                * choose which value to write. Which is reverse order from
+                * register logic. So only case that works is when pllid is
+                * same as crtcid or when both pll and crtc are enabled and
+                * both use same clock.
+                *
+                * So just return crtc id as if crtc and pll were hard linked
+                * together even if they aren't
+                */
                return radeon_crtc->crtc_id;
        }
 }
index 443ad64b7f2a2344ff6e8801dfe2c7f0a1e0a0a1..d88d9be1d1b7881b5fe5acfd674bf1ca12c17793 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/input.h>
 #include <linux/of.h>
 #include <linux/export.h>
+#include <linux/module.h>
 #include <linux/input/matrix_keypad.h>
 
 static bool matrix_keypad_map_key(struct input_dev *input_dev,
@@ -161,3 +162,5 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
        return 0;
 }
 EXPORT_SYMBOL(matrix_keypad_build_keymap);
+
+MODULE_LICENSE("GPL");
index 636bae0405e8167edcec328e5759b3f949dd0876..a0f73092176e06cd736885496e02a5fab601dffe 100644 (file)
@@ -963,7 +963,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
        struct r1conf *conf = mddev->private;
        struct bio *bio;
 
-       if (from_schedule) {
+       if (from_schedule || current->bio_list) {
                spin_lock_irq(&conf->device_lock);
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
index 0d5d0ff2c0f7beb47a02bd4692273b31369b2bc5..c9acbd717131ba8e28f4362d3e1aca967922ede8 100644 (file)
@@ -1069,7 +1069,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        struct r10conf *conf = mddev->private;
        struct bio *bio;
 
-       if (from_schedule) {
+       if (from_schedule || current->bio_list) {
                spin_lock_irq(&conf->device_lock);
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
index 047f0f0434ecbdebeeb88e00801659193b49c11d..c267c57c76fdeec23c6fb51c0609929f630c0e91 100644 (file)
@@ -657,8 +657,7 @@ static int gsc_m2m_release(struct file *file)
        pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
                task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
 
-       if (mutex_lock_interruptible(&gsc->lock))
-               return -ERESTARTSYS;
+       mutex_lock(&gsc->lock);
 
        v4l2_m2m_ctx_release(ctx->m2m_ctx);
        gsc_ctrls_delete(ctx);
@@ -732,6 +731,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
        gsc->vdev.ioctl_ops     = &gsc_m2m_ioctl_ops;
        gsc->vdev.release       = video_device_release_empty;
        gsc->vdev.lock          = &gsc->lock;
+       gsc->vdev.vfl_dir       = VFL_DIR_M2M;
        snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
                                        GSC_MODULE_NAME, gsc->id);
 
index 533e9947a925db5a2eec166fbcd25dca0a1392b8..4678f9a6a4fd0e6f6d2c9cb7bb57e69698295956 100644 (file)
 #define GSC_IN_ROT_YFLIP               (2 << 16)
 #define GSC_IN_ROT_XFLIP               (1 << 16)
 #define GSC_IN_RGB_TYPE_MASK           (3 << 14)
-#define GSC_IN_RGB_HD_WIDE             (3 << 14)
-#define GSC_IN_RGB_HD_NARROW           (2 << 14)
-#define GSC_IN_RGB_SD_WIDE             (1 << 14)
-#define GSC_IN_RGB_SD_NARROW           (0 << 14)
+#define GSC_IN_RGB_HD_NARROW           (3 << 14)
+#define GSC_IN_RGB_HD_WIDE             (2 << 14)
+#define GSC_IN_RGB_SD_NARROW           (1 << 14)
+#define GSC_IN_RGB_SD_WIDE             (0 << 14)
 #define GSC_IN_YUV422_1P_ORDER_MASK    (1 << 13)
 #define GSC_IN_YUV422_1P_ORDER_LSB_Y   (0 << 13)
 #define GSC_IN_YUV422_1P_OEDER_LSB_C   (1 << 13)
 #define GSC_OUT_GLOBAL_ALPHA_MASK      (0xff << 24)
 #define GSC_OUT_GLOBAL_ALPHA(x)                ((x) << 24)
 #define GSC_OUT_RGB_TYPE_MASK          (3 << 10)
-#define GSC_OUT_RGB_HD_NARROW          (3 << 10)
-#define GSC_OUT_RGB_HD_WIDE            (2 << 10)
-#define GSC_OUT_RGB_SD_NARROW          (1 << 10)
-#define GSC_OUT_RGB_SD_WIDE            (0 << 10)
+#define GSC_OUT_RGB_HD_WIDE            (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW          (2 << 10)
+#define GSC_OUT_RGB_SD_WIDE            (1 << 10)
+#define GSC_OUT_RGB_SD_NARROW          (0 << 10)
 #define GSC_OUT_YUV422_1P_ORDER_MASK   (1 << 9)
 #define GSC_OUT_YUV422_1P_ORDER_LSB_Y  (0 << 9)
 #define GSC_OUT_YUV422_1P_OEDER_LSB_C  (1 << 9)
index 0d0aca5008764048d262bd86bc584894289dbb1c..fdb6740248a73f58bd2c7ab42c72f03991d907f0 100644 (file)
@@ -556,8 +556,7 @@ static int fimc_capture_close(struct file *file)
 
        dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
 
-       if (mutex_lock_interruptible(&fimc->lock))
-               return -ERESTARTSYS;
+       mutex_lock(&fimc->lock);
 
        if (--fimc->vid_cap.refcnt == 0) {
                clear_bit(ST_CAPT_BUSY, &fimc->state);
@@ -1783,9 +1782,13 @@ static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
        if (ret)
                return ret;
 
+       fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd);
+
        ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
-       if (ret)
+       if (ret) {
                fimc_unregister_m2m_device(fimc);
+               fimc->pipeline_ops = NULL;
+       }
 
        return ret;
 }
@@ -1802,6 +1805,7 @@ static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
        if (video_is_registered(&fimc->vid_cap.vfd)) {
                video_unregister_device(&fimc->vid_cap.vfd);
                media_entity_cleanup(&fimc->vid_cap.vfd.entity);
+               fimc->pipeline_ops = NULL;
        }
        kfree(fimc->vid_cap.ctx);
        fimc->vid_cap.ctx = NULL;
index 9db246bed841abc4622433d82f668e801e0cf3d7..1b309a72f09fbe847c1b4ac2178c1717a68f5a04 100644 (file)
@@ -491,8 +491,7 @@ static int fimc_lite_close(struct file *file)
        struct fimc_lite *fimc = video_drvdata(file);
        int ret;
 
-       if (mutex_lock_interruptible(&fimc->lock))
-               return -ERESTARTSYS;
+       mutex_lock(&fimc->lock);
 
        if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
                clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -1263,10 +1262,12 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
                return ret;
 
        video_set_drvdata(vfd, fimc);
+       fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd);
 
        ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
        if (ret < 0) {
                media_entity_cleanup(&vfd->entity);
+               fimc->pipeline_ops = NULL;
                return ret;
        }
 
@@ -1285,6 +1286,7 @@ static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
        if (video_is_registered(&fimc->vfd)) {
                video_unregister_device(&fimc->vfd);
                media_entity_cleanup(&fimc->vfd.entity);
+               fimc->pipeline_ops = NULL;
        }
 }
 
index 26bcf4bc4209b3fe2349e5fdfebb4ebb72512f6f..1d21da4bd24be130dbcdd2b15032ff198390804a 100644 (file)
@@ -728,8 +728,7 @@ static int fimc_m2m_release(struct file *file)
        dbg("pid: %d, state: 0x%lx, refcnt= %d",
                task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
 
-       if (mutex_lock_interruptible(&fimc->lock))
-               return -ERESTARTSYS;
+       mutex_lock(&fimc->lock);
 
        v4l2_m2m_ctx_release(ctx->m2m_ctx);
        fimc_ctrls_delete(ctx);
index 9bd5dd4162206b261b94660d2e498404ef3c7072..1bd5678cfeb9d13f4bfd4b1dc0f7ffccac5ce33b 100644 (file)
@@ -352,6 +352,7 @@ static int fimc_register_callback(struct device *dev, void *p)
 
        sd = &fimc->vid_cap.subdev;
        sd->grp_id = FIMC_GROUP_ID;
+       v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
 
        ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
        if (ret) {
@@ -360,7 +361,6 @@ static int fimc_register_callback(struct device *dev, void *p)
                return ret;
        }
 
-       fimc->pipeline_ops = &fimc_pipeline_ops;
        fmd->fimc[fimc->id] = fimc;
        return 0;
 }
@@ -375,6 +375,7 @@ static int fimc_lite_register_callback(struct device *dev, void *p)
                return 0;
 
        fimc->subdev.grp_id = FLITE_GROUP_ID;
+       v4l2_set_subdev_hostdata(&fimc->subdev, (void *)&fimc_pipeline_ops);
 
        ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
        if (ret) {
@@ -384,7 +385,6 @@ static int fimc_lite_register_callback(struct device *dev, void *p)
                return ret;
        }
 
-       fimc->pipeline_ops = &fimc_pipeline_ops;
        fmd->fimc_lite[fimc->index] = fimc;
        return 0;
 }
index 130f4ac8649ec644faf589e734145761f5d7cc91..3afe879d54d7dec195cc99ff49aa316ff42ff53a 100644 (file)
@@ -381,11 +381,8 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
                ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
                                                get_consumed_stream, dev);
                if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
-                       s5p_mfc_hw_call(dev->mfc_ops,
-                               get_dec_frame_type, dev) ==
-                                       S5P_FIMV_DECODE_FRAME_P_FRAME
-                                       && ctx->consumed_stream + STUFF_BYTE <
-                                       src_buf->b->v4l2_planes[0].bytesused) {
+                       ctx->consumed_stream + STUFF_BYTE <
+                       src_buf->b->v4l2_planes[0].bytesused) {
                        /* Run MFC again on the same buffer */
                        mfc_debug(2, "Running again the same buffer\n");
                        ctx->after_packed_pb = 1;
index 50b5bee3c44e970070d4af07c0a6751c333d08e9..3a8cfd9fc1bd045e8fb05bd177b341c01e25595b 100644 (file)
@@ -1762,7 +1762,7 @@ int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev)
 
 int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
 {
-       return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6);
+       return mfc_read(dev, S5P_FIMV_D_DECODED_LUMA_ADDR_V6);
 }
 
 int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
index a54dd5d7a5f94b1b3e550e799ee95999c5e54df8..c9ec725884e5cbe6b841a2008241fc8b20db2d8d 100644 (file)
@@ -373,18 +373,25 @@ static struct sdhci_ops sdhci_s3c_ops = {
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
 {
        struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_s3c *sc = sdhci_priv(host);
        unsigned long flags;
 
        if (host) {
                spin_lock_irqsave(&host->lock, flags);
                if (state) {
                        dev_dbg(&dev->dev, "card inserted.\n");
+#ifdef CONFIG_PM_RUNTIME
+                       clk_prepare_enable(sc->clk_io);
+#endif
                        host->flags &= ~SDHCI_DEVICE_DEAD;
                        host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
                } else {
                        dev_dbg(&dev->dev, "card removed.\n");
                        host->flags |= SDHCI_DEVICE_DEAD;
                        host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+#ifdef CONFIG_PM_RUNTIME
+                       clk_disable_unprepare(sc->clk_io);
+#endif
                }
                tasklet_schedule(&host->card_tasklet);
                spin_unlock_irqrestore(&host->lock, flags);
index d25bc97dc5c60063bb609b22973e41790ced9762..7eaee3eeb6b2bffed13af54651e6ad5e19d0b409 100644 (file)
@@ -1104,7 +1104,6 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 {
        struct sh_mmcif_host *host = dev_id;
        struct mmc_request *mrq = host->mrq;
-       struct mmc_data *data = mrq->data;
 
        cancel_delayed_work_sync(&host->timeout_work);
 
@@ -1152,13 +1151,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
        case MMCIF_WAIT_FOR_READ_END:
        case MMCIF_WAIT_FOR_WRITE_END:
                if (host->sd_error)
-                       data->error = sh_mmcif_error_manage(host);
+                       mrq->data->error = sh_mmcif_error_manage(host);
                break;
        default:
                BUG();
        }
 
        if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+               struct mmc_data *data = mrq->data;
                if (!mrq->cmd->error && data && !data->error)
                        data->bytes_xfered =
                                data->blocks * data->blksz;
@@ -1231,10 +1231,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
                host->sd_error = true;
                dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
        }
-       if (host->state == STATE_IDLE) {
-               dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
-               return IRQ_HANDLED;
-       }
        if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
                if (!host->dma_active)
                        return IRQ_WAKE_THREAD;
index da7b44998b402866643ef824055d6ed3916bcf4f..2144f611196e3188bd3983ea5e084dd4bafd47ff 100644 (file)
@@ -498,7 +498,7 @@ out:
  * @ubi: UBI device description object
  *
  * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure. Might sleep.
+ * negative error code in case of failure.
  */
 static int __wl_get_peb(struct ubi_device *ubi)
 {
@@ -540,13 +540,6 @@ retry:
         * ubi_wl_get_peb() after removing e from the pool. */
        prot_queue_add(ubi, e);
 #endif
-       err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
-                                   ubi->peb_size - ubi->vid_hdr_aloffset);
-       if (err) {
-               ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
-               return err;
-       }
-
        return e->pnum;
 }
 
@@ -679,17 +672,30 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 #else
 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 {
-       return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+       struct ubi_wl_entry *e;
+
+       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+       self_check_in_wl_tree(ubi, e, &ubi->free);
+       rb_erase(&e->u.rb, &ubi->free);
+
+       return e;
 }
 
 int ubi_wl_get_peb(struct ubi_device *ubi)
 {
-       int peb;
+       int peb, err;
 
        spin_lock(&ubi->wl_lock);
        peb = __wl_get_peb(ubi);
        spin_unlock(&ubi->wl_lock);
 
+       err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+                                   ubi->peb_size - ubi->vid_hdr_aloffset);
+       if (err) {
+               ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
+               return err;
+       }
+
        return peb;
 }
 #endif
index 5f5b69f37d2e50d4a6ebbe91f4c3bc1346e042e3..a7d47350ea4b5657d4ee9683b92d7243ea2a9361 100644 (file)
@@ -3459,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 
 /*-------------------------- Device entry points ----------------------------*/
 
+static void bond_work_init_all(struct bonding *bond)
+{
+       INIT_DELAYED_WORK(&bond->mcast_work,
+                         bond_resend_igmp_join_requests_delayed);
+       INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
+       INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
+       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+               INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
+       else
+               INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
+       INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
+}
+
+static void bond_work_cancel_all(struct bonding *bond)
+{
+       cancel_delayed_work_sync(&bond->mii_work);
+       cancel_delayed_work_sync(&bond->arp_work);
+       cancel_delayed_work_sync(&bond->alb_work);
+       cancel_delayed_work_sync(&bond->ad_work);
+       cancel_delayed_work_sync(&bond->mcast_work);
+}
+
 static int bond_open(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
@@ -3481,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev)
        }
        read_unlock(&bond->lock);
 
-       INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
+       bond_work_init_all(bond);
 
        if (bond_is_lb(bond)) {
                /* bond_alb_initialize must be called before the timer
                 * is started.
                 */
-               if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
-                       /* something went wrong - fail the open operation */
+               if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
                        return -ENOMEM;
-               }
-
-               INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
                queue_delayed_work(bond->wq, &bond->alb_work, 0);
        }
 
-       if (bond->params.miimon) {  /* link check interval, in milliseconds. */
-               INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
+       if (bond->params.miimon)  /* link check interval, in milliseconds. */
                queue_delayed_work(bond->wq, &bond->mii_work, 0);
-       }
 
        if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
-               if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
-                       INIT_DELAYED_WORK(&bond->arp_work,
-                                         bond_activebackup_arp_mon);
-               else
-                       INIT_DELAYED_WORK(&bond->arp_work,
-                                         bond_loadbalance_arp_mon);
-
                queue_delayed_work(bond->wq, &bond->arp_work, 0);
                if (bond->params.arp_validate)
                        bond->recv_probe = bond_arp_rcv;
        }
 
        if (bond->params.mode == BOND_MODE_8023AD) {
-               INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
                queue_delayed_work(bond->wq, &bond->ad_work, 0);
                /* register to receive LACPDUs */
                bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3530,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev)
        struct bonding *bond = netdev_priv(bond_dev);
 
        write_lock_bh(&bond->lock);
-
        bond->send_peer_notif = 0;
-
        write_unlock_bh(&bond->lock);
 
-       if (bond->params.miimon) {  /* link check interval, in milliseconds. */
-               cancel_delayed_work_sync(&bond->mii_work);
-       }
-
-       if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
-               cancel_delayed_work_sync(&bond->arp_work);
-       }
-
-       switch (bond->params.mode) {
-       case BOND_MODE_8023AD:
-               cancel_delayed_work_sync(&bond->ad_work);
-               break;
-       case BOND_MODE_TLB:
-       case BOND_MODE_ALB:
-               cancel_delayed_work_sync(&bond->alb_work);
-               break;
-       default:
-               break;
-       }
-
-       if (delayed_work_pending(&bond->mcast_work))
-               cancel_delayed_work_sync(&bond->mcast_work);
-
+       bond_work_cancel_all(bond);
        if (bond_is_lb(bond)) {
                /* Must be called only after all
                 * slaves have been released
@@ -4436,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev)
        bond_dev->features |= bond_dev->hw_features;
 }
 
-static void bond_work_cancel_all(struct bonding *bond)
-{
-       if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
-               cancel_delayed_work_sync(&bond->mii_work);
-
-       if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
-               cancel_delayed_work_sync(&bond->arp_work);
-
-       if (bond->params.mode == BOND_MODE_ALB &&
-           delayed_work_pending(&bond->alb_work))
-               cancel_delayed_work_sync(&bond->alb_work);
-
-       if (bond->params.mode == BOND_MODE_8023AD &&
-           delayed_work_pending(&bond->ad_work))
-               cancel_delayed_work_sync(&bond->ad_work);
-
-       if (delayed_work_pending(&bond->mcast_work))
-               cancel_delayed_work_sync(&bond->mcast_work);
-}
-
 /*
 * Destroy a bonding device.
 * Must be under rtnl_lock when this function is called.
@@ -4706,12 +4670,13 @@ static int bond_check_params(struct bond_params *params)
             arp_ip_count++) {
                /* not complete check, but should be good enough to
                   catch mistakes */
-               if (!isdigit(arp_ip_target[arp_ip_count][0])) {
+               __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
+               if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
+                   ip == 0 || ip == htonl(INADDR_BROADCAST)) {
                        pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
                                   arp_ip_target[arp_ip_count]);
                        arp_interval = 0;
                } else {
-                       __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
                        arp_target[arp_ip_count] = ip;
                }
        }
index ef8d2a080d17f4c61b8f28aa7e07bc54ed5c0abc..1877ed7ca0864ed4adfb614de862ed40dde730bd 100644 (file)
@@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
        int new_value, ret = count;
        struct bonding *bond = to_bond(d);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
        if (sscanf(buf, "%d", &new_value) != 1) {
                pr_err("%s: no arp_interval value specified.\n",
                       bond->dev->name);
@@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
                        bond->dev->name, bond->dev->name);
                bond->params.miimon = 0;
-               if (delayed_work_pending(&bond->mii_work)) {
-                       cancel_delayed_work(&bond->mii_work);
-                       flush_workqueue(bond->wq);
-               }
        }
        if (!bond->params.arp_targets[0]) {
                pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
@@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                 * timer will get fired off when the open function
                 * is called.
                 */
-               if (!delayed_work_pending(&bond->arp_work)) {
-                       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
-                               INIT_DELAYED_WORK(&bond->arp_work,
-                                                 bond_activebackup_arp_mon);
-                       else
-                               INIT_DELAYED_WORK(&bond->arp_work,
-                                                 bond_loadbalance_arp_mon);
-
-                       queue_delayed_work(bond->wq, &bond->arp_work, 0);
-               }
+               cancel_delayed_work_sync(&bond->mii_work);
+               queue_delayed_work(bond->wq, &bond->arp_work, 0);
        }
 
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
@@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d,
        int new_value, ret = count;
        struct bonding *bond = to_bond(d);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
        if (sscanf(buf, "%d", &new_value) != 1) {
                pr_err("%s: no miimon value specified.\n",
                       bond->dev->name);
@@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d,
                                bond->params.arp_validate =
                                        BOND_ARP_VALIDATE_NONE;
                        }
-                       if (delayed_work_pending(&bond->arp_work)) {
-                               cancel_delayed_work(&bond->arp_work);
-                               flush_workqueue(bond->wq);
-                       }
                }
 
                if (bond->dev->flags & IFF_UP) {
@@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d,
                         * timer will get fired off when the open function
                         * is called.
                         */
-                       if (!delayed_work_pending(&bond->mii_work)) {
-                               INIT_DELAYED_WORK(&bond->mii_work,
-                                                 bond_mii_monitor);
-                               queue_delayed_work(bond->wq,
-                                                  &bond->mii_work, 0);
-                       }
+                       cancel_delayed_work_sync(&bond->arp_work);
+                       queue_delayed_work(bond->wq, &bond->mii_work, 0);
                }
        }
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
@@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                goto out;
        }
 
+       read_lock(&bond->lock);
        bond_for_each_slave(bond, slave, i) {
                if (!bond_is_active_slave(slave)) {
                        if (new_value)
@@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                                slave->inactive = 1;
                }
        }
+       read_unlock(&bond->lock);
 out:
        return ret;
 }
index 86f26a1ede4c18a233a85310b0bfb5f9a92da856..25723d8ee20130b19ad95b885a99e5c1ba019517 100644 (file)
@@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
        mc->pdev->dev.can.state = new_state;
 
        if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
+               struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+
                peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
-               skb->tstamp = timeval_to_ktime(tv);
+               hwts->hwtstamp = timeval_to_ktime(tv);
        }
 
        netif_rx(skb);
@@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
        struct sk_buff *skb;
        struct can_frame *cf;
        struct timeval tv;
+       struct skb_shared_hwtstamps *hwts;
 
        skb = alloc_can_skb(mc->netdev, &cf);
        if (!skb)
@@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
 
        /* convert timestamp into kernel time */
        peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       hwts = skb_hwtstamps(skb);
+       hwts->hwtstamp = timeval_to_ktime(tv);
 
        /* push the skb */
        netif_rx(skb);
index e1626d92511adc88d084345f7fdbf098a4aafb09..30d79bfa5b109e5d6d212df46658a4cf1216d6e1 100644 (file)
@@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
        struct can_frame *can_frame;
        struct sk_buff *skb;
        struct timeval tv;
+       struct skb_shared_hwtstamps *hwts;
 
        skb = alloc_can_skb(netdev, &can_frame);
        if (!skb)
@@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
                memcpy(can_frame->data, rx->data, can_frame->can_dlc);
 
        peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       hwts = skb_hwtstamps(skb);
+       hwts->hwtstamp = timeval_to_ktime(tv);
 
        netif_rx(skb);
        netdev->stats.rx_packets++;
@@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
        u8 err_mask = 0;
        struct sk_buff *skb;
        struct timeval tv;
+       struct skb_shared_hwtstamps *hwts;
 
        /* nothing should be sent while in BUS_OFF state */
        if (dev->can.state == CAN_STATE_BUS_OFF)
@@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
        dev->can.state = new_state;
 
        peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       hwts = skb_hwtstamps(skb);
+       hwts->hwtstamp = timeval_to_ktime(tv);
        netif_rx(skb);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += can_frame->can_dlc;
index 5d36795877cb48ef90cd8ea5fac1c11ca05e30d0..b799ab12a2918bb7f0c5b8bdc0d5db70ecaf4392 100644 (file)
@@ -237,7 +237,7 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
        if (err)
                return err;
 
-       memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
+       memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
 
        return 0;
 }
index b01f83a044c4d77f70141c81fee3815cd050f258..609125a249d9484835f77fd445e6a8ba32e299db 100644 (file)
@@ -1060,17 +1060,22 @@ static int cp_init_rings (struct cp_private *cp)
 
 static int cp_alloc_rings (struct cp_private *cp)
 {
+       struct device *d = &cp->pdev->dev;
        void *mem;
+       int rc;
 
-       mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
-                                &cp->ring_dma, GFP_KERNEL);
+       mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
        cp->rx_ring = mem;
        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
 
-       return cp_init_rings(cp);
+       rc = cp_init_rings(cp);
+       if (rc < 0)
+               dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
+
+       return rc;
 }
 
 static void cp_clean_rings (struct cp_private *cp)
index d44cca327588858e26023a99cee8c64a940cf173..ad86660fb8f92b092f06c9b41d03e416991f4636 100644 (file)
@@ -1794,10 +1794,12 @@ static void team_setup(struct net_device *dev)
 
        dev->features |= NETIF_F_LLTX;
        dev->features |= NETIF_F_GRO;
-       dev->hw_features = NETIF_F_HW_VLAN_TX |
+       dev->hw_features = TEAM_VLAN_FEATURES |
+                          NETIF_F_HW_VLAN_TX |
                           NETIF_F_HW_VLAN_RX |
                           NETIF_F_HW_VLAN_FILTER;
 
+       dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
        dev->features |= dev->hw_features;
 }
 
index 3b566fa0f8e6277ee62f281211a0507dfdf7c227..1ea91f4237f053e731093fe3fc85e5897f97f5d7 100644 (file)
@@ -385,6 +385,7 @@ static const struct usb_device_id products[] = {
        },
 
        /* 3. Combined interface devices matching on interface number */
+       {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
index e9a3da588e954b1ae38b8becf465b132dbbcd22f..760776b3d66c0033cae82c73c18fcad8fd7b57d5 100644 (file)
@@ -1365,7 +1365,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, port);
 
-       netdev_info(dev, "HSS-%i\n", port->id);
+       netdev_info(dev, "initialized\n");
        return 0;
 
 err_free_netdev:
index 10896393e5a05be9b44de522f388eb91603b81a8..2830ea29050286f1a8d44d43a14ca820810daca9 100644 (file)
@@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
         * As a consequence, it's not as complicated as it sounds, just add
         * any lower rates to the ACK rate bitmap.
         */
-       if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
-               ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
-       if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
-               ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
-       if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
-               ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
+       if (IWL_RATE_11M_INDEX < lowest_present_cck)
+               cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
+       if (IWL_RATE_5M_INDEX < lowest_present_cck)
+               cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
+       if (IWL_RATE_2M_INDEX < lowest_present_cck)
+               cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
        /* 1M already there or needed so always add */
        cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
 
index e7a4780e93db4fb8ab9c3134debc960930cd6906..9e198e5906750baf663518296d5ae130cbf4a05d 100644 (file)
@@ -120,15 +120,11 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
        return vq;
 }
 
-static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
 {
        struct virtqueue *vq, *n;
-       struct rproc *rproc = vdev_to_rproc(vdev);
        struct rproc_vring *rvring;
 
-       /* power down the remote processor before deleting vqs */
-       rproc_shutdown(rproc);
-
        list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
                rvring = vq->priv;
                rvring->vq = NULL;
@@ -137,6 +133,16 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
        }
 }
 
+static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+{
+       struct rproc *rproc = vdev_to_rproc(vdev);
+
+       /* power down the remote processor before deleting vqs */
+       rproc_shutdown(rproc);
+
+       __rproc_virtio_del_vqs(vdev);
+}
+
 static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                       struct virtqueue *vqs[],
                       vq_callback_t *callbacks[],
@@ -163,7 +169,7 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        return 0;
 
 error:
-       rproc_virtio_del_vqs(vdev);
+       __rproc_virtio_del_vqs(vdev);
        return ret;
 }
 
index 7a82337e4deee1ae9d1648ec6df5dbbcfe344ba4..073108dcf9e7b5660f1a4f9fb7074605bf247845 100644 (file)
@@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
 static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
 {
        /* leave rtc running, but disable irqs */
-       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
 
-       tps65910_rtc_alarm_irq_enable(&rtc->dev, 0);
+       tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
 
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(tps_rtc->rtc);
        return 0;
 }
 
index 16b7a72a70c4aae344cefb39f64d4b9ca5155eb8..3b2365c8eab23a38fda34d71b27a7497294dd5fb 100644 (file)
@@ -1276,7 +1276,7 @@ struct megasas_evt_detail {
 } __attribute__ ((packed));
 
 struct megasas_aen_event {
-       struct work_struct hotplug_work;
+       struct delayed_work hotplug_work;
        struct megasas_instance *instance;
 };
 
index d2c5366aff7fba67ffa0bdef6007d69ff8e37de2..e4f2baacf1e1783ec9054bae3f7013afb5af0a5f 100644 (file)
@@ -2060,9 +2060,9 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
                } else {
                        ev->instance = instance;
                        instance->ev = ev;
-                       INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
-                       schedule_delayed_work(
-                               (struct delayed_work *)&ev->hotplug_work, 0);
+                       INIT_DELAYED_WORK(&ev->hotplug_work,
+                                         megasas_aen_polling);
+                       schedule_delayed_work(&ev->hotplug_work, 0);
                }
        }
 }
@@ -4352,8 +4352,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
        /* cancel the delayed work if this work still in queue */
        if (instance->ev != NULL) {
                struct megasas_aen_event *ev = instance->ev;
-               cancel_delayed_work_sync(
-                       (struct delayed_work *)&ev->hotplug_work);
+               cancel_delayed_work_sync(&ev->hotplug_work);
                instance->ev = NULL;
        }
 
@@ -4545,8 +4544,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
        /* cancel the delayed work if this work still in queue*/
        if (instance->ev != NULL) {
                struct megasas_aen_event *ev = instance->ev;
-               cancel_delayed_work_sync(
-                       (struct delayed_work *)&ev->hotplug_work);
+               cancel_delayed_work_sync(&ev->hotplug_work);
                instance->ev = NULL;
        }
 
@@ -5190,7 +5188,7 @@ static void
 megasas_aen_polling(struct work_struct *work)
 {
        struct megasas_aen_event *ev =
-               container_of(work, struct megasas_aen_event, hotplug_work);
+               container_of(work, struct megasas_aen_event, hotplug_work.work);
        struct megasas_instance *instance = ev->instance;
        union megasas_evt_class_locale class_locale;
        struct  Scsi_Host *host;
index 9097155e9ebe7100c0bc1a4ac13b7c508a84ac8a..dcecbfb172436b3ec0d51232efd988e7247f9afe 100644 (file)
@@ -1819,8 +1819,10 @@ void target_execute_cmd(struct se_cmd *cmd)
        /*
         * If the received CDB has aleady been aborted stop processing it here.
         */
-       if (transport_check_aborted_status(cmd, 1))
+       if (transport_check_aborted_status(cmd, 1)) {
+               complete(&cmd->t_transport_stop_comp);
                return;
+       }
 
        /*
         * Determine if IOCTL context caller in requesting the stopping of this
@@ -3067,7 +3069,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+       if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return;
        }
index 99ac2cb08b43bcb3b570969d752bff25126f298d..dedaf81d8f36fd89a5258d9a21fd261401f6e0d9 100644 (file)
@@ -1076,7 +1076,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
                }
                _iov = iov + ret;
                size = reg->memory_size - addr + reg->guest_phys_addr;
-               _iov->iov_len = min((u64)len, size);
+               _iov->iov_len = min((u64)len - s, size);
                _iov->iov_base = (void __user *)(unsigned long)
                        (reg->userspace_addr + addr - reg->guest_phys_addr);
                s += size;
index 1a1e5e3b1eafc7f00454cb2d544b8761161d015e..ab3a456f66506a5a134f5280739e82c572b15f55 100644 (file)
@@ -70,19 +70,6 @@ static void bdev_inode_switch_bdi(struct inode *inode,
        spin_unlock(&dst->wb.list_lock);
 }
 
-sector_t blkdev_max_block(struct block_device *bdev)
-{
-       sector_t retval = ~((sector_t)0);
-       loff_t sz = i_size_read(bdev->bd_inode);
-
-       if (sz) {
-               unsigned int size = block_size(bdev);
-               unsigned int sizebits = blksize_bits(size);
-               retval = (sz >> sizebits);
-       }
-       return retval;
-}
-
 /* Kill _all_ buffers and pagecache , dirty or not.. */
 void kill_bdev(struct block_device *bdev)
 {
@@ -116,8 +103,6 @@ EXPORT_SYMBOL(invalidate_bdev);
 
 int set_blocksize(struct block_device *bdev, int size)
 {
-       struct address_space *mapping;
-
        /* Size must be a power of two, and between 512 and PAGE_SIZE */
        if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
                return -EINVAL;
@@ -126,19 +111,6 @@ int set_blocksize(struct block_device *bdev, int size)
        if (size < bdev_logical_block_size(bdev))
                return -EINVAL;
 
-       /* Prevent starting I/O or mapping the device */
-       percpu_down_write(&bdev->bd_block_size_semaphore);
-
-       /* Check that the block device is not memory mapped */
-       mapping = bdev->bd_inode->i_mapping;
-       mutex_lock(&mapping->i_mmap_mutex);
-       if (mapping_mapped(mapping)) {
-               mutex_unlock(&mapping->i_mmap_mutex);
-               percpu_up_write(&bdev->bd_block_size_semaphore);
-               return -EBUSY;
-       }
-       mutex_unlock(&mapping->i_mmap_mutex);
-
        /* Don't change the size if it is same as current */
        if (bdev->bd_block_size != size) {
                sync_blockdev(bdev);
@@ -146,9 +118,6 @@ int set_blocksize(struct block_device *bdev, int size)
                bdev->bd_inode->i_blkbits = blksize_bits(size);
                kill_bdev(bdev);
        }
-
-       percpu_up_write(&bdev->bd_block_size_semaphore);
-
        return 0;
 }
 
@@ -181,52 +150,12 @@ static int
 blkdev_get_block(struct inode *inode, sector_t iblock,
                struct buffer_head *bh, int create)
 {
-       if (iblock >= blkdev_max_block(I_BDEV(inode))) {
-               if (create)
-                       return -EIO;
-
-               /*
-                * for reads, we're just trying to fill a partial page.
-                * return a hole, they will have to call get_block again
-                * before they can fill it, and they will get -EIO at that
-                * time
-                */
-               return 0;
-       }
        bh->b_bdev = I_BDEV(inode);
        bh->b_blocknr = iblock;
        set_buffer_mapped(bh);
        return 0;
 }
 
-static int
-blkdev_get_blocks(struct inode *inode, sector_t iblock,
-               struct buffer_head *bh, int create)
-{
-       sector_t end_block = blkdev_max_block(I_BDEV(inode));
-       unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
-
-       if ((iblock + max_blocks) > end_block) {
-               max_blocks = end_block - iblock;
-               if ((long)max_blocks <= 0) {
-                       if (create)
-                               return -EIO;    /* write fully beyond EOF */
-                       /*
-                        * It is a read which is fully beyond EOF.  We return
-                        * a !buffer_mapped buffer
-                        */
-                       max_blocks = 0;
-               }
-       }
-
-       bh->b_bdev = I_BDEV(inode);
-       bh->b_blocknr = iblock;
-       bh->b_size = max_blocks << inode->i_blkbits;
-       if (max_blocks)
-               set_buffer_mapped(bh);
-       return 0;
-}
-
 static ssize_t
 blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                        loff_t offset, unsigned long nr_segs)
@@ -235,7 +164,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        struct inode *inode = file->f_mapping->host;
 
        return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
-                                   nr_segs, blkdev_get_blocks, NULL, NULL, 0);
+                                   nr_segs, blkdev_get_block, NULL, NULL, 0);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
@@ -459,12 +388,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
        struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
-
-       if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) {
-               kmem_cache_free(bdev_cachep, ei);
-               return NULL;
-       }
-
        return &ei->vfs_inode;
 }
 
@@ -473,8 +396,6 @@ static void bdev_i_callback(struct rcu_head *head)
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct bdev_inode *bdi = BDEV_I(inode);
 
-       percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore);
-
        kmem_cache_free(bdev_cachep, bdi);
 }
 
@@ -1593,22 +1514,6 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        return blkdev_ioctl(bdev, mode, cmd, arg);
 }
 
-ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       ssize_t ret;
-       struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
-
-       percpu_down_read(&bdev->bd_block_size_semaphore);
-
-       ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
-
-       percpu_up_read(&bdev->bd_block_size_semaphore);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(blkdev_aio_read);
-
 /*
  * Write data to the block device.  Only intended for the block device itself
  * and the raw driver which basically is a fake block device.
@@ -1620,16 +1525,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       struct block_device *bdev = I_BDEV(file->f_mapping->host);
        struct blk_plug plug;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
        blk_start_plug(&plug);
-
-       percpu_down_read(&bdev->bd_block_size_semaphore);
-
        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        if (ret > 0 || ret == -EIOCBQUEUED) {
                ssize_t err;
@@ -1638,62 +1539,27 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                if (err < 0 && ret > 0)
                        ret = err;
        }
-
-       percpu_up_read(&bdev->bd_block_size_semaphore);
-
        blk_finish_plug(&plug);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkdev_aio_write);
 
-static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       int ret;
-       struct block_device *bdev = I_BDEV(file->f_mapping->host);
-
-       percpu_down_read(&bdev->bd_block_size_semaphore);
-
-       ret = generic_file_mmap(file, vma);
-
-       percpu_up_read(&bdev->bd_block_size_semaphore);
-
-       return ret;
-}
-
-static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos,
-                                 struct pipe_inode_info *pipe, size_t len,
-                                 unsigned int flags)
-{
-       ssize_t ret;
-       struct block_device *bdev = I_BDEV(file->f_mapping->host);
-
-       percpu_down_read(&bdev->bd_block_size_semaphore);
-
-       ret = generic_file_splice_read(file, ppos, pipe, len, flags);
-
-       percpu_up_read(&bdev->bd_block_size_semaphore);
-
-       return ret;
-}
-
-static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe,
-                                  struct file *file, loff_t *ppos, size_t len,
-                                  unsigned int flags)
+static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t pos)
 {
-       ssize_t ret;
-       struct block_device *bdev = I_BDEV(file->f_mapping->host);
-
-       percpu_down_read(&bdev->bd_block_size_semaphore);
-
-       ret = generic_file_splice_write(pipe, file, ppos, len, flags);
+       struct file *file = iocb->ki_filp;
+       struct inode *bd_inode = file->f_mapping->host;
+       loff_t size = i_size_read(bd_inode);
 
-       percpu_up_read(&bdev->bd_block_size_semaphore);
+       if (pos >= size)
+               return 0;
 
-       return ret;
+       size -= pos;
+       if (size < INT_MAX)
+               nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
+       return generic_file_aio_read(iocb, iov, nr_segs, pos);
 }
 
-
 /*
  * Try to release a page associated with block device when the system
  * is under memory pressure.
@@ -1724,16 +1590,16 @@ const struct file_operations def_blk_fops = {
        .llseek         = block_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = blkdev_aio_read,
+       .aio_read       = blkdev_aio_read,
        .aio_write      = blkdev_aio_write,
-       .mmap           = blkdev_mmap,
+       .mmap           = generic_file_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = block_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = compat_blkdev_ioctl,
 #endif
-       .splice_read    = blkdev_splice_read,
-       .splice_write   = blkdev_splice_write,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = generic_file_splice_write,
 };
 
 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
index b5f044283edb53b1c9a65f01b385de3f77898a91..ec0aca8ba6bfdc79022563007207c5a70d23bd37 100644 (file)
@@ -911,6 +911,18 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
        attach_page_buffers(page, head);
 }
 
+static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
+{
+       sector_t retval = ~((sector_t)0);
+       loff_t sz = i_size_read(bdev->bd_inode);
+
+       if (sz) {
+               unsigned int sizebits = blksize_bits(size);
+               retval = (sz >> sizebits);
+       }
+       return retval;
+}
+
 /*
  * Initialise the state of a blockdev page's buffers.
  */ 
@@ -921,7 +933,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
        struct buffer_head *head = page_buffers(page);
        struct buffer_head *bh = head;
        int uptodate = PageUptodate(page);
-       sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
+       sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
 
        do {
                if (!buffer_mapped(bh)) {
@@ -1552,6 +1564,28 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
 }
 EXPORT_SYMBOL(unmap_underlying_metadata);
 
+/*
+ * Size is a power-of-two in the range 512..PAGE_SIZE,
+ * and the case we care about most is PAGE_SIZE.
+ *
+ * So this *could* possibly be written with those
+ * constraints in mind (relevant mostly if some
+ * architecture has a slow bit-scan instruction)
+ */
+static inline int block_size_bits(unsigned int blocksize)
+{
+       return ilog2(blocksize);
+}
+
+static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
+{
+       BUG_ON(!PageLocked(page));
+
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
+       return page_buffers(page);
+}
+
 /*
  * NOTE! All mapped/uptodate combinations are valid:
  *
@@ -1589,19 +1623,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        sector_t block;
        sector_t last_block;
        struct buffer_head *bh, *head;
-       const unsigned blocksize = 1 << inode->i_blkbits;
+       unsigned int blocksize, bbits;
        int nr_underway = 0;
        int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
                        WRITE_SYNC : WRITE);
 
-       BUG_ON(!PageLocked(page));
-
-       last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
-
-       if (!page_has_buffers(page)) {
-               create_empty_buffers(page, blocksize,
+       head = create_page_buffers(page, inode,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
-       }
 
        /*
         * Be very careful.  We have no exclusion from __set_page_dirty_buffers
@@ -1613,9 +1641,12 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         * handle that here by just cleaning them.
         */
 
-       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       head = page_buffers(page);
        bh = head;
+       blocksize = bh->b_size;
+       bbits = block_size_bits(blocksize);
+
+       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+       last_block = (i_size_read(inode) - 1) >> bbits;
 
        /*
         * Get all the dirty buffers mapped to disk addresses and
@@ -1806,12 +1837,10 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
        BUG_ON(to > PAGE_CACHE_SIZE);
        BUG_ON(from > to);
 
-       blocksize = 1 << inode->i_blkbits;
-       if (!page_has_buffers(page))
-               create_empty_buffers(page, blocksize, 0);
-       head = page_buffers(page);
+       head = create_page_buffers(page, inode, 0);
+       blocksize = head->b_size;
+       bbits = block_size_bits(blocksize);
 
-       bbits = inode->i_blkbits;
        block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
 
        for(bh = head, block_start = 0; bh != head || !block_start;
@@ -1881,11 +1910,11 @@ static int __block_commit_write(struct inode *inode, struct page *page,
        unsigned blocksize;
        struct buffer_head *bh, *head;
 
-       blocksize = 1 << inode->i_blkbits;
+       bh = head = page_buffers(page);
+       blocksize = bh->b_size;
 
-       for(bh = head = page_buffers(page), block_start = 0;
-           bh != head || !block_start;
-           block_start=block_end, bh = bh->b_this_page) {
+       block_start = 0;
+       do {
                block_end = block_start + blocksize;
                if (block_end <= from || block_start >= to) {
                        if (!buffer_uptodate(bh))
@@ -1895,7 +1924,10 @@ static int __block_commit_write(struct inode *inode, struct page *page,
                        mark_buffer_dirty(bh);
                }
                clear_buffer_new(bh);
-       }
+
+               block_start = block_end;
+               bh = bh->b_this_page;
+       } while (bh != head);
 
        /*
         * If this is a partial write which happened to make all buffers
@@ -2020,7 +2052,6 @@ EXPORT_SYMBOL(generic_write_end);
 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
                                        unsigned long from)
 {
-       struct inode *inode = page->mapping->host;
        unsigned block_start, block_end, blocksize;
        unsigned to;
        struct buffer_head *bh, *head;
@@ -2029,13 +2060,13 @@ int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
        if (!page_has_buffers(page))
                return 0;
 
-       blocksize = 1 << inode->i_blkbits;
+       head = page_buffers(page);
+       blocksize = head->b_size;
        to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
        to = from + to;
        if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
                return 0;
 
-       head = page_buffers(page);
        bh = head;
        block_start = 0;
        do {
@@ -2068,18 +2099,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        struct inode *inode = page->mapping->host;
        sector_t iblock, lblock;
        struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
-       unsigned int blocksize;
+       unsigned int blocksize, bbits;
        int nr, i;
        int fully_mapped = 1;
 
-       BUG_ON(!PageLocked(page));
-       blocksize = 1 << inode->i_blkbits;
-       if (!page_has_buffers(page))
-               create_empty_buffers(page, blocksize, 0);
-       head = page_buffers(page);
+       head = create_page_buffers(page, inode, 0);
+       blocksize = head->b_size;
+       bbits = block_size_bits(blocksize);
 
-       iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
+       iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+       lblock = (i_size_read(inode)+blocksize-1) >> bbits;
        bh = head;
        nr = 0;
        i = 0;
@@ -2864,6 +2893,55 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
        bio_put(bio);
 }
 
+/*
+ * This allows us to do IO even on the odd last sectors
+ * of a device, even if the bh block size is some multiple
+ * of the physical sector size.
+ *
+ * We'll just truncate the bio to the size of the device,
+ * and clear the end of the buffer head manually.
+ *
+ * Truly out-of-range accesses will turn into actual IO
+ * errors, this only handles the "we need to be able to
+ * do IO at the final sector" case.
+ */
+static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
+{
+       sector_t maxsector;
+       unsigned bytes;
+
+       maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
+       if (!maxsector)
+               return;
+
+       /*
+        * If the *whole* IO is past the end of the device,
+        * let it through, and the IO layer will turn it into
+        * an EIO.
+        */
+       if (unlikely(bio->bi_sector >= maxsector))
+               return;
+
+       maxsector -= bio->bi_sector;
+       bytes = bio->bi_size;
+       if (likely((bytes >> 9) <= maxsector))
+               return;
+
+       /* Uhhuh. We've got a bh that straddles the device size! */
+       bytes = maxsector << 9;
+
+       /* Truncate the bio.. */
+       bio->bi_size = bytes;
+       bio->bi_io_vec[0].bv_len = bytes;
+
+       /* ..and clear the end of the buffer for reads */
+       if ((rw & RW_MASK) == READ) {
+               void *kaddr = kmap_atomic(bh->b_page);
+               memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
+               kunmap_atomic(kaddr);
+       }
+}
+
 int submit_bh(int rw, struct buffer_head * bh)
 {
        struct bio *bio;
@@ -2900,6 +2978,9 @@ int submit_bh(int rw, struct buffer_head * bh)
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
 
+       /* Take care of bh's that straddle the end of the device */
+       guard_bh_eod(rw, bio, bh);
+
        bio_get(bio);
        submit_bio(rw, bio);
 
index edb25b4bbb959c49e0681d7a7b992b2e1093a710..70b6f4c3a0c1c73d1a0babc2be2dbcab0b17e552 100644 (file)
@@ -1794,7 +1794,6 @@ static int cifs_writepages(struct address_space *mapping,
        struct TCP_Server_Info *server;
        struct page *page;
        int rc = 0;
-       loff_t isize = i_size_read(mapping->host);
 
        /*
         * If wsize is smaller than the page cache size, default to writing
@@ -1899,7 +1898,7 @@ retry:
                         */
                        set_page_writeback(page);
 
-                       if (page_offset(page) >= isize) {
+                       if (page_offset(page) >= i_size_read(mapping->host)) {
                                done = true;
                                unlock_page(page);
                                end_page_writeback(page);
@@ -1932,7 +1931,8 @@ retry:
                wdata->offset = page_offset(wdata->pages[0]);
                wdata->pagesz = PAGE_CACHE_SIZE;
                wdata->tailsz =
-                       min(isize - page_offset(wdata->pages[nr_pages - 1]),
+                       min(i_size_read(mapping->host) -
+                           page_offset(wdata->pages[nr_pages - 1]),
                            (loff_t)PAGE_CACHE_SIZE);
                wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
                                        wdata->tailsz;
index f9b5d3d6cf33461081f4945d61a5d604fe2ef468..1c576e8713669534be76853aa6c45e97482def96 100644 (file)
@@ -86,14 +86,17 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        dentry = d_lookup(parent, name);
        if (dentry) {
+               int err;
                inode = dentry->d_inode;
                /* update inode in place if i_ino didn't change */
                if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
                        cifs_fattr_to_inode(inode, fattr);
                        return dentry;
                }
-               d_drop(dentry);
+               err = d_invalidate(dentry);
                dput(dentry);
+               if (err)
+                       return NULL;
        }
 
        dentry = d_alloc(parent, name);
index 56cc4be87807ae2c734b647c5c78d5480d234e7b..34cea27983335fbf9dea43697cc8663ecf5aa091 100644 (file)
@@ -766,7 +766,6 @@ smb_set_file_info(struct inode *inode, const char *full_path,
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink = NULL;
        struct cifs_tcon *tcon;
-       FILE_BASIC_INFO info_buf;
 
        /* if the file is already open for write, just use that fileid */
        open_file = find_writable_file(cinode, true);
@@ -817,7 +816,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
        netpid = current->tgid;
 
 set_via_filehandle:
-       rc = CIFSSMBSetFileInfo(xid, tcon, &info_buf, netfid, netpid);
+       rc = CIFSSMBSetFileInfo(xid, tcon, buf, netfid, netpid);
        if (!rc)
                cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
 
index f86c720dba0eeea72d7ecefdefa8ad0b52886011..cf5b44b10c6759454e662dcf0a99f3511f374a3c 100644 (file)
@@ -540,6 +540,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
        sector_t fs_endblk;     /* Into file, in filesystem-sized blocks */
        unsigned long fs_count; /* Number of filesystem-sized blocks */
        int create;
+       unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
 
        /*
         * If there was a memory error and we've overwritten all the
@@ -554,7 +555,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
                fs_count = fs_endblk - fs_startblk + 1;
 
                map_bh->b_state = 0;
-               map_bh->b_size = fs_count << dio->inode->i_blkbits;
+               map_bh->b_size = fs_count << i_blkbits;
 
                /*
                 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
@@ -1053,7 +1054,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        int seg;
        size_t size;
        unsigned long addr;
-       unsigned blkbits = inode->i_blkbits;
+       unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
+       unsigned blkbits = i_blkbits;
        unsigned blocksize_mask = (1 << blkbits) - 1;
        ssize_t retval = -EINVAL;
        loff_t end = offset;
@@ -1149,7 +1151,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        dio->inode = inode;
        dio->rw = rw;
        sdio.blkbits = blkbits;
-       sdio.blkfactor = inode->i_blkbits - blkbits;
+       sdio.blkfactor = i_blkbits - blkbits;
        sdio.block_in_file = offset >> blkbits;
 
        sdio.get_block = get_block;
index 7cb71b99260340fe7302d2a8c5dc4f84f8029aff..eff23162485f93176255199b3ed74176534ae489 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -994,16 +994,18 @@ int iterate_fd(struct files_struct *files, unsigned n,
                const void *p)
 {
        struct fdtable *fdt;
-       struct file *file;
        int res = 0;
        if (!files)
                return 0;
        spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       while (!res && n < fdt->max_fds) {
-               file = rcu_dereference_check_fdtable(files, fdt->fd[n++]);
-               if (file)
-                       res = f(p, file, n);
+       for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
+               struct file *file;
+               file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
+               if (!file)
+                       continue;
+               res = f(p, file, n);
+               if (res)
+                       break;
        }
        spin_unlock(&files->file_lock);
        return res;
index 937f9d50c84bdead7516057fead3d5a702ead4b9..5f4cdf3ad913fec928835d87fbe499efdf7d67e5 100644 (file)
@@ -2131,6 +2131,11 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
        if (!len)
                return ERR_PTR(-EACCES);
 
+       if (unlikely(name[0] == '.')) {
+               if (len < 2 || (len == 2 && name[1] == '.'))
+                       return ERR_PTR(-EACCES);
+       }
+
        while (len--) {
                c = *(const unsigned char *)name++;
                if (c == '/' || c == '\0')
index ce8cb926526bfad79aabe5b8448f80212e1670f9..b9e66b7e0c1495ba05c1fb6a4b2f1557ca6a9b24 100644 (file)
@@ -450,7 +450,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
                        nfs_refresh_inode(dentry->d_inode, entry->fattr);
                        goto out;
                } else {
-                       d_drop(dentry);
+                       if (d_invalidate(dentry) != 0)
+                               goto out;
                        dput(dentry);
                }
        }
@@ -1100,6 +1101,8 @@ out_set_verifier:
 out_zap_parent:
        nfs_zap_caches(dir);
  out_bad:
+       nfs_free_fattr(fattr);
+       nfs_free_fhandle(fhandle);
        nfs_mark_for_revalidate(dir);
        if (inode && S_ISDIR(inode->i_mode)) {
                /* Purge readdir caches. */
@@ -1112,8 +1115,6 @@ out_zap_parent:
                shrink_dcache_parent(dentry);
        }
        d_drop(dentry);
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
        dput(parent);
        dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
                        __func__, dentry->d_parent->d_name.name,
index b33cfc97b9caef81de7e525c44e23c4db6a5ba18..75fe9a1348036c52b863fb47737eb6b0f9dd8789 100644 (file)
@@ -462,8 +462,6 @@ struct block_device {
        int                     bd_fsfreeze_count;
        /* Mutex for freeze */
        struct mutex            bd_fsfreeze_mutex;
-       /* A semaphore that prevents I/O while block size is being changed */
-       struct percpu_rw_semaphore      bd_block_size_semaphore;
 };
 
 /*
@@ -2049,7 +2047,6 @@ extern void unregister_blkdev(unsigned int, const char *);
 extern struct block_device *bdget(dev_t);
 extern struct block_device *bdgrab(struct block_device *bdev);
 extern void bd_set_size(struct block_device *, loff_t size);
-extern sector_t blkdev_max_block(struct block_device *bdev);
 extern void bd_forget(struct inode *inode);
 extern void bdput(struct block_device *);
 extern void invalidate_bdev(struct block_device *);
@@ -2379,8 +2376,6 @@ extern int generic_segment_checks(const struct iovec *iov,
                unsigned long *nr_segs, size_t *count, int access_flags);
 
 /* fs/block_dev.c */
-extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos);
 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                                unsigned long nr_segs, loff_t pos);
 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
index 6ae9c631a1be51c7221321a9a2ceb787217e4ff0..0464c85e63fd05d1fc10f0f03eeac56fe03f3e8e 100644 (file)
@@ -1,35 +1,8 @@
 #ifndef _LINUX_HW_BREAKPOINT_H
 #define _LINUX_HW_BREAKPOINT_H
 
-enum {
-       HW_BREAKPOINT_LEN_1 = 1,
-       HW_BREAKPOINT_LEN_2 = 2,
-       HW_BREAKPOINT_LEN_4 = 4,
-       HW_BREAKPOINT_LEN_8 = 8,
-};
-
-enum {
-       HW_BREAKPOINT_EMPTY     = 0,
-       HW_BREAKPOINT_R         = 1,
-       HW_BREAKPOINT_W         = 2,
-       HW_BREAKPOINT_RW        = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
-       HW_BREAKPOINT_X         = 4,
-       HW_BREAKPOINT_INVALID   = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
-};
-
-enum bp_type_idx {
-       TYPE_INST       = 0,
-#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
-       TYPE_DATA       = 0,
-#else
-       TYPE_DATA       = 1,
-#endif
-       TYPE_MAX
-};
-
-#ifdef __KERNEL__
-
 #include <linux/perf_event.h>
+#include <uapi/linux/hw_breakpoint.h>
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 
@@ -151,6 +124,4 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
 }
 
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_HW_BREAKPOINT_H */
index a123b13b70fd80cb12def4c100ee198606b7c6b6..7d8dfc7392f11f020f1a4de28ff66f416c19c964 100644 (file)
@@ -701,6 +701,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
 #define COMPACTION_BUILD 0
 #endif
 
+/* This helps us to avoid #ifdef CONFIG_SYMBOL_PREFIX */
+#ifdef CONFIG_SYMBOL_PREFIX
+#define SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
+#else
+#define SYMBOL_PREFIX ""
+#endif
+
 /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
index e5ccb9ddd90eeb634665e88afd13d56184fff241..dbd212723b74e3d88eb7a7e13bc241e420db84bb 100644 (file)
@@ -82,16 +82,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
                __mpol_put(pol);
 }
 
-extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
-                                         struct mempolicy *frompol);
-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
-                                               struct mempolicy *frompol)
-{
-       if (!frompol)
-               return frompol;
-       return __mpol_cond_copy(tompol, frompol);
-}
-
 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 {
@@ -215,12 +205,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
 {
 }
 
-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
-                                               struct mempolicy *from)
-{
-       return from;
-}
-
 static inline void mpol_get(struct mempolicy *pol)
 {
 }
index f8eda0276f03fadf3be56e8e70f928f792714406..a848ffc327f4f19e3b1da926f52aa361fd27b654 100644 (file)
@@ -1488,6 +1488,9 @@ struct napi_gro_cb {
 
        /* Used in ipv6_gro_receive() */
        int     proto;
+
+       /* used in skb_gro_receive() slow path */
+       struct sk_buff *last;
 };
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
index 250a4acddb2b8975a6c8278a3c5b1d803f36b585..bd1e86071e57c27219b2746ab715d419c60fcc2c 100644 (file)
@@ -13,7 +13,7 @@ struct percpu_rw_semaphore {
 };
 
 #define light_mb()     barrier()
-#define heavy_mb()     synchronize_sched()
+#define heavy_mb()     synchronize_sched_expedited()
 
 static inline void percpu_down_read(struct percpu_rw_semaphore *p)
 {
@@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
 {
        mutex_lock(&p->mtx);
        p->locked = true;
-       synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
+       synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
        while (__percpu_count(p->counters))
                msleep(1);
        heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
index 6feeccd83dd7557abd30e32c0547f483b79bd238..4af45e33105db2ee37b8ca66d6ec4122aa487456 100644 (file)
@@ -525,6 +525,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
                                      int nonagle);
 extern bool tcp_may_send_now(struct sock *sk);
+extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 extern void tcp_retransmit_timer(struct sock *sk);
 extern void tcp_xmit_retransmit_queue(struct sock *);
index e194387ef7845a77c53114f0e3dbada06e4c8189..19e765fbfef716442b38f1bc89eb7490c08c0c76 100644 (file)
@@ -415,3 +415,4 @@ header-y += wireless.h
 header-y += x25.h
 header-y += xattr.h
 header-y += xfrm.h
+header-y += hw_breakpoint.h
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h
new file mode 100644 (file)
index 0000000..b04000a
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _UAPI_LINUX_HW_BREAKPOINT_H
+#define _UAPI_LINUX_HW_BREAKPOINT_H
+
+enum {
+       HW_BREAKPOINT_LEN_1 = 1,
+       HW_BREAKPOINT_LEN_2 = 2,
+       HW_BREAKPOINT_LEN_4 = 4,
+       HW_BREAKPOINT_LEN_8 = 8,
+};
+
+enum {
+       HW_BREAKPOINT_EMPTY     = 0,
+       HW_BREAKPOINT_R         = 1,
+       HW_BREAKPOINT_W         = 2,
+       HW_BREAKPOINT_RW        = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+       HW_BREAKPOINT_X         = 4,
+       HW_BREAKPOINT_INVALID   = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
+};
+
+enum bp_type_idx {
+       TYPE_INST       = 0,
+#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
+       TYPE_DATA       = 0,
+#else
+       TYPE_DATA       = 1,
+#endif
+       TYPE_MAX
+};
+
+#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
index 9a7b487c6fe240c1a2e4f5c70ef68da6370ebf78..fe8a916507ed278cf545196561878a842547a865 100644 (file)
@@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  * Count the number of breakpoints of the same type and same task.
  * The given event must be not on the list.
  */
-static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
+static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
 {
        struct task_struct *tsk = bp->hw.bp_target;
        struct perf_event *iter;
        int count = 0;
 
        list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
-               if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
+               if (iter->hw.bp_target == tsk &&
+                   find_slot_idx(iter) == type &&
+                   cpu == iter->cpu)
                        count += hw_breakpoint_weight(iter);
        }
 
@@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                if (!tsk)
                        slots->pinned += max_task_bp_pinned(cpu, type);
                else
-                       slots->pinned += task_bp_pinned(bp, type);
+                       slots->pinned += task_bp_pinned(cpu, bp, type);
                slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
 
                return;
@@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                if (!tsk)
                        nr += max_task_bp_pinned(cpu, type);
                else
-                       nr += task_bp_pinned(bp, type);
+                       nr += task_bp_pinned(cpu, bp, type);
 
                if (nr > slots->pinned)
                        slots->pinned = nr;
@@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
        int old_idx = 0;
        int idx = 0;
 
-       old_count = task_bp_pinned(bp, type);
+       old_count = task_bp_pinned(cpu, bp, type);
        old_idx = old_count - 1;
        idx = old_idx + weight;
 
index 4646eb2c382011de7c051577d0fe8b590b4dddca..767e559dfb10e697d207cdc1dedca0b00d5b5bdf 100644 (file)
@@ -21,10 +21,10 @@ struct key *modsign_keyring;
 extern __initdata const u8 modsign_certificate_list[];
 extern __initdata const u8 modsign_certificate_list_end[];
 asm(".section .init.data,\"aw\"\n"
-    "modsign_certificate_list:\n"
+    SYMBOL_PREFIX "modsign_certificate_list:\n"
     ".incbin \"signing_key.x509\"\n"
     ".incbin \"extra_certificates\"\n"
-    "modsign_certificate_list_end:"
+    SYMBOL_PREFIX "modsign_certificate_list_end:"
     );
 
 /*
index ea1b1df5dbb0077a167caaf8e013b8baf765e902..f2970bddc5ea6224b8c0357970a543c90ac11da0 100644 (file)
  *     - Information block
  */
 struct module_signature {
-       enum pkey_algo          algo : 8;       /* Public-key crypto algorithm */
-       enum pkey_hash_algo     hash : 8;       /* Digest algorithm */
-       enum pkey_id_type       id_type : 8;    /* Key identifier type */
-       u8                      signer_len;     /* Length of signer's name */
-       u8                      key_id_len;     /* Length of key identifier */
-       u8                      __pad[3];
-       __be32                  sig_len;        /* Length of signature data */
+       u8      algo;           /* Public-key crypto algorithm [enum pkey_algo] */
+       u8      hash;           /* Digest algorithm [enum pkey_hash_algo] */
+       u8      id_type;        /* Key identifier type [enum pkey_id_type] */
+       u8      signer_len;     /* Length of signer's name */
+       u8      key_id_len;     /* Length of key identifier */
+       u8      __pad[3];
+       __be32  sig_len;        /* Length of signature data */
 };
 
 /*
index 0984a21076a3ed8a37e56d5381bff6320d8fa5f2..15f60d01198bf4b0b126f8f3ac9fa8f2726aaa89 100644 (file)
@@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
 
        p->signal->autogroup = autogroup_kref_get(ag);
 
-       if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
-               goto out;
-
        t = p;
        do {
                sched_move_task(t);
        } while_each_thread(p, t);
 
-out:
        unlock_task_sighand(p, &flags);
        autogroup_kref_put(prev);
 }
index 8bd047142816dea81894bb27ccc3c78a38ac3d61..443232ebbb53b24ea369ca3c4fbb7b53c5e8cbef 100644 (file)
@@ -4,11 +4,6 @@
 #include <linux/rwsem.h>
 
 struct autogroup {
-       /*
-        * reference doesn't mean how many thread attach to this
-        * autogroup now. It just stands for the number of task
-        * could use this autogroup.
-        */
        struct kref             kref;
        struct task_group       *tg;
        struct rw_semaphore     lock;
index dd4b80a9f1a986ff35b192655064db6db3b85883..c8c21be11ab48e25569b43a31aee148d7d89c6d1 100644 (file)
@@ -368,6 +368,9 @@ static void watchdog_disable(unsigned int cpu)
 {
        struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
 
+       if (!watchdog_enabled)
+               return;
+
        watchdog_set_prio(SCHED_NORMAL, 0);
        hrtimer_cancel(hrtimer);
        /* disable the perf event */
index 042d221d33cc1675fadf7ee291e86717f2c8f6c9..1dae900df798d1c4e1be722567fd2906041f08f5 100644 (file)
@@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 
        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
                     timer->data != (unsigned long)dwork);
-       BUG_ON(timer_pending(timer));
-       BUG_ON(!list_empty(&work->entry));
+       WARN_ON_ONCE(timer_pending(timer));
+       WARN_ON_ONCE(!list_empty(&work->entry));
+
+       /*
+        * If @delay is 0, queue @dwork->work immediately.  This is for
+        * both optimization and correctness.  The earliest @timer can
+        * expire is on the closest next tick and delayed_work users depend
+        * on that there's no such delay when @delay is 0.
+        */
+       if (!delay) {
+               __queue_work(cpu, wq, &dwork->work);
+               return;
+       }
 
        timer_stats_timer_set_start_info(&dwork->timer);
 
@@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        bool ret = false;
        unsigned long flags;
 
-       if (!delay)
-               return queue_work_on(cpu, wq, &dwork->work);
-
        /* read the comment in __queue_work() */
        local_irq_save(flags);
 
@@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq)
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
-       if (kthread_should_stop())
+       if (kthread_should_stop()) {
+               __set_current_state(TASK_RUNNING);
                return 0;
+       }
 
        /*
         * See whether any cpu is asking for help.  Unbounded
index 821a16229111eba69f189ae9be683f02eeb70862..a08b791200f3723883fc1328f7410a076f1146ee 100644 (file)
@@ -163,7 +163,7 @@ $(obj)/crc32table.h: $(obj)/gen_crc32table
 #
 obj-$(CONFIG_OID_REGISTRY) += oid_registry.o
 
-$(obj)/oid_registry.c: $(obj)/oid_registry_data.c
+$(obj)/oid_registry.o: $(obj)/oid_registry_data.c
 
 $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
                            $(src)/build_OID_registry
index de2c8b5a715bd9c1f75129bc40293ac68f1f92e9..5293d2433029d391c3b95c18320d45c77d117c67 100644 (file)
@@ -91,7 +91,7 @@ next_tag:
 
        /* Extract the length */
        len = data[dp++];
-       if (len < 0x7f) {
+       if (len <= 0x7f) {
                dp += len;
                goto next_tag;
        }
index 9eef55838fca5ff99580982e85ea0c0fa6dea839..694eaabaaebdc0827c93d81c97f200fc534a115e 100644 (file)
@@ -713,7 +713,15 @@ static void isolate_freepages(struct zone *zone,
 
                /* Found a block suitable for isolating free pages from */
                isolated = 0;
-               end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+
+               /*
+                * As pfn may not start aligned, pfn+pageblock_nr_page
+                * may cross a MAX_ORDER_NR_PAGES boundary and miss
+                * a pfn_valid check. Ensure isolate_freepages_block()
+                * only scans within a pageblock
+                */
+               end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+               end_pfn = min(end_pfn, zone_end_pfn);
                isolated = isolate_freepages_block(cc, pfn, end_pfn,
                                                   freelist, false);
                nr_freepages += isolated;
index 6c5899b9034aa7d769e5fd80e139cad61af104c0..8b20278be6a62fa49a23615cf5bee60bd71b0ed0 100644 (file)
@@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags)
 {
        int ret;
        unsigned long pfn = page_to_pfn(page);
+       struct page *hpage = compound_trans_head(page);
 
        if (PageHuge(page))
                return soft_offline_huge_page(page, flags);
+       if (PageTransHuge(hpage)) {
+               if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+                       pr_info("soft offline: %#lx: failed to split THP\n",
+                               pfn);
+                       return -EBUSY;
+               }
+       }
 
        ret = get_any_page(page, pfn, flags);
        if (ret < 0)
index d04a8a54c294f852f55e624e3c41d1b03b7a56e9..4ea600da8940aa209bad6e0ca56ee69ad480ebc2 100644 (file)
@@ -2037,28 +2037,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
        return new;
 }
 
-/*
- * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
- * eliminate the * MPOL_F_* flags that require conditional ref and
- * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
- * after return.  Use the returned value.
- *
- * Allows use of a mempolicy for, e.g., multiple allocations with a single
- * policy lookup, even if the policy needs/has extra ref on lookup.
- * shmem_readahead needs this.
- */
-struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
-                                               struct mempolicy *frompol)
-{
-       if (!mpol_needs_cond_ref(frompol))
-               return frompol;
-
-       *tompol = *frompol;
-       tompol->flags &= ~MPOL_F_SHARED;        /* copy doesn't need unref */
-       __mpol_put(frompol);
-       return tompol;
-}
-
 /* Slow path of a mempolicy comparison */
 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
index 92871579cbee3b8feb12376df03ad2969f942c48..7e208f0ad68ccb1468b408b5a50a981be8beb3a5 100644 (file)
@@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
                }
        }
 
-       return 1UL << order;
+       return 1UL << alloc_order;
 }
 
 /*
index 89341b658bd06b05d908f8becfef7fe0d904b808..50c5b8f3a359b611a22e1e3715dd816880e4651e 100644 (file)
@@ -910,25 +910,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
                        struct shmem_inode_info *info, pgoff_t index)
 {
-       struct mempolicy mpol, *spol;
        struct vm_area_struct pvma;
-
-       spol = mpol_cond_copy(&mpol,
-                       mpol_shared_policy_lookup(&info->policy, index));
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        /* Bias interleave by inode number to distribute better across nodes */
        pvma.vm_pgoff = index + info->vfs_inode.i_ino;
        pvma.vm_ops = NULL;
-       pvma.vm_policy = spol;
-       return swapin_readahead(swap, gfp, &pvma, 0);
+       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+
+       page = swapin_readahead(swap, gfp, &pvma, 0);
+
+       /* Drop reference taken by mpol_shared_policy_lookup() */
+       mpol_cond_put(pvma.vm_policy);
+
+       return page;
 }
 
 static struct page *shmem_alloc_page(gfp_t gfp,
                        struct shmem_inode_info *info, pgoff_t index)
 {
        struct vm_area_struct pvma;
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
@@ -937,10 +941,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
        pvma.vm_ops = NULL;
        pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
 
-       /*
-        * alloc_page_vma() will drop the shared policy reference
-        */
-       return alloc_page_vma(gfp, &pvma, 0);
+       page = alloc_page_vma(gfp, &pvma, 0);
+
+       /* Drop reference taken by mpol_shared_policy_lookup() */
+       mpol_cond_put(pvma.vm_policy);
+
+       return page;
 }
 #else /* !CONFIG_NUMA */
 #ifdef CONFIG_TMPFS
index fac95f2888f2d1fbe923fd83ff7d43786ac89c58..a83de2f72b3061ee541c3b74fc6ddc6cfb8e202d 100644 (file)
@@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
 {
        return; /* XXX: Not implemented yet */
 }
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
 {
 }
 #else
@@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
                           get_order(sizeof(struct page) * nr_pages));
 }
 
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
 {
        unsigned long maps_section_nr, removing_section_nr, i;
        unsigned long magic;
+       struct page *page = virt_to_page(memmap);
 
        for (i = 0; i < nr_pages; i++, page++) {
                magic = (unsigned long) page->lru.next;
@@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
         */
 
        if (memmap) {
-               struct page *memmap_page;
-               memmap_page = virt_to_page(memmap);
-
                nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
                        >> PAGE_SHIFT;
 
-               free_map_bootmem(memmap_page, nr_pages);
+               free_map_bootmem(memmap, nr_pages);
        }
 }
 
index cbf84e152f043c82b15d754a9f8c2c4c50866488..b7ed37675644ce7734946bfe5d2e2cfdc7f08fbc 100644 (file)
@@ -2414,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
        } while (memcg);
 }
 
+static bool zone_balanced(struct zone *zone, int order,
+                         unsigned long balance_gap, int classzone_idx)
+{
+       if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+                                   balance_gap, classzone_idx, 0))
+               return false;
+
+       if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+               return false;
+
+       return true;
+}
+
 /*
  * pgdat_balanced is used when checking if a node is balanced for high-order
  * allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2492,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
                        continue;
                }
 
-               if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
-                                                       i, 0))
+               if (!zone_balanced(zone, order, 0, i))
                        all_zones_ok = false;
                else
                        balanced += zone->present_pages;
@@ -2602,8 +2614,7 @@ loop_again:
                                break;
                        }
 
-                       if (!zone_watermark_ok_safe(zone, order,
-                                       high_wmark_pages(zone), 0, 0)) {
+                       if (!zone_balanced(zone, order, 0, 0)) {
                                end_zone = i;
                                break;
                        } else {
@@ -2679,9 +2690,8 @@ loop_again:
                                testorder = 0;
 
                        if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
-                                   !zone_watermark_ok_safe(zone, testorder,
-                                       high_wmark_pages(zone) + balance_gap,
-                                       end_zone, 0)) {
+                           !zone_balanced(zone, testorder,
+                                          balance_gap, end_zone)) {
                                shrink_zone(zone, &sc);
 
                                reclaim_state->reclaimed_slab = 0;
@@ -2708,8 +2718,7 @@ loop_again:
                                continue;
                        }
 
-                       if (!zone_watermark_ok_safe(zone, testorder,
-                                       high_wmark_pages(zone), end_zone, 0)) {
+                       if (!zone_balanced(zone, testorder, 0, end_zone)) {
                                all_zones_ok = 0;
                                /*
                                 * We are still under min water mark.  This
@@ -2814,29 +2823,10 @@ out:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable &&
-                           sc.priority != DEF_PRIORITY)
-                               continue;
-
-                       /* Would compaction fail due to lack of free memory? */
-                       if (COMPACTION_BUILD &&
-                           compaction_suitable(zone, order) == COMPACT_SKIPPED)
-                               goto loop_again;
-
-                       /* Confirm the zone is balanced for order-0 */
-                       if (!zone_watermark_ok(zone, 0,
-                                       high_wmark_pages(zone), 0, 0)) {
-                               order = sc.order = 0;
-                               goto loop_again;
-                       }
-
                        /* Check if the memory needs to be defragmented. */
                        if (zone_watermark_ok(zone, order,
                                    low_wmark_pages(zone), *classzone_idx, 0))
                                zones_need_compaction = 0;
-
-                       /* If balanced, clear the congested flag */
-                       zone_clear_flag(zone, ZONE_CONGESTED);
                }
 
                if (zones_need_compaction)
index 6f747582718ed28464b4eaa2bd45740939c13506..969b7cdff59d9480e4a7a77cbff72acb0e12eae7 100644 (file)
@@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                op->sk = sk;
                op->ifindex = ifindex;
 
+               /* ifindex for timeout events w/o previous frame reception */
+               op->rx_ifindex = ifindex;
+
                /* initialize uninitialized (kzalloc) structure */
                hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
                op->timer.function = bcm_rx_timeout_handler;
index c0946cb2b3547f4fdea0fa56b8f7e17c71a75012..e5942bf45a6d9b7e7412a2100b7ef116610aab65 100644 (file)
@@ -3451,6 +3451,8 @@ static int napi_gro_complete(struct sk_buff *skb)
        struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
        int err = -ENOENT;
 
+       BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
+
        if (NAPI_GRO_CB(skb)->count == 1) {
                skb_shinfo(skb)->gso_size = 0;
                goto out;
index 4007c1437fdaf615bbf197ad52e545529e7bce59..3f0636cd76cdcd132278e4f860ee1f581fd8174c 100644 (file)
@@ -3004,7 +3004,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        skb_shinfo(nskb)->gso_size = pinfo->gso_size;
        pinfo->gso_size = 0;
        skb_header_release(p);
-       nskb->prev = p;
+       NAPI_GRO_CB(nskb)->last = p;
 
        nskb->data_len += p->len;
        nskb->truesize += p->truesize;
@@ -3030,8 +3030,8 @@ merge:
 
        __skb_pull(skb, offset);
 
-       p->prev->next = skb;
-       p->prev = skb;
+       NAPI_GRO_CB(p)->last->next = skb;
+       NAPI_GRO_CB(p)->last = skb;
        skb_header_release(skb);
 
 done:
index f2eccd531746bea19eb269fa0d75266b20f69ebc..17ff9fd7cddab0db42c718173cfd85a9586d6d16 100644 (file)
@@ -257,7 +257,8 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
                struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
                rc = inet_peer_xrlim_allow(peer,
                                           net->ipv4.sysctl_icmp_ratelimit);
-               inet_putpeer(peer);
+               if (peer)
+                       inet_putpeer(peer);
        }
 out:
        return rc;
index 0c34bfabc11fc8bcb8056a67abf0feb0b652154f..e23e16dc501d5458632df5e884149f89b30f9e28 100644 (file)
@@ -44,6 +44,10 @@ struct inet_diag_entry {
        u16 dport;
        u16 family;
        u16 userlocks;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr saddr_storage;  /* for IPv4-mapped-IPv6 addresses */
+       struct in6_addr daddr_storage;  /* for IPv4-mapped-IPv6 addresses */
+#endif
 };
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -428,25 +432,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
                                break;
                        }
 
-                       if (cond->prefix_len == 0)
-                               break;
-
                        if (op->code == INET_DIAG_BC_S_COND)
                                addr = entry->saddr;
                        else
                                addr = entry->daddr;
 
+                       if (cond->family != AF_UNSPEC &&
+                           cond->family != entry->family) {
+                               if (entry->family == AF_INET6 &&
+                                   cond->family == AF_INET) {
+                                       if (addr[0] == 0 && addr[1] == 0 &&
+                                           addr[2] == htonl(0xffff) &&
+                                           bitstring_match(addr + 3,
+                                                           cond->addr,
+                                                           cond->prefix_len))
+                                               break;
+                               }
+                               yes = 0;
+                               break;
+                       }
+
+                       if (cond->prefix_len == 0)
+                               break;
                        if (bitstring_match(addr, cond->addr,
                                            cond->prefix_len))
                                break;
-                       if (entry->family == AF_INET6 &&
-                           cond->family == AF_INET) {
-                               if (addr[0] == 0 && addr[1] == 0 &&
-                                   addr[2] == htonl(0xffff) &&
-                                   bitstring_match(addr + 3, cond->addr,
-                                                   cond->prefix_len))
-                                       break;
-                       }
                        yes = 0;
                        break;
                }
@@ -509,6 +519,55 @@ static int valid_cc(const void *bc, int len, int cc)
        return 0;
 }
 
+/* Validate an inet_diag_hostcond. */
+static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
+                          int *min_len)
+{
+       int addr_len;
+       struct inet_diag_hostcond *cond;
+
+       /* Check hostcond space. */
+       *min_len += sizeof(struct inet_diag_hostcond);
+       if (len < *min_len)
+               return false;
+       cond = (struct inet_diag_hostcond *)(op + 1);
+
+       /* Check address family and address length. */
+       switch (cond->family) {
+       case AF_UNSPEC:
+               addr_len = 0;
+               break;
+       case AF_INET:
+               addr_len = sizeof(struct in_addr);
+               break;
+       case AF_INET6:
+               addr_len = sizeof(struct in6_addr);
+               break;
+       default:
+               return false;
+       }
+       *min_len += addr_len;
+       if (len < *min_len)
+               return false;
+
+       /* Check prefix length (in bits) vs address length (in bytes). */
+       if (cond->prefix_len > 8 * addr_len)
+               return false;
+
+       return true;
+}
+
+/* Validate a port comparison operator. */
+static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
+                                        int len, int *min_len)
+{
+       /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
+       *min_len += sizeof(struct inet_diag_bc_op);
+       if (len < *min_len)
+               return false;
+       return true;
+}
+
 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 {
        const void *bc = bytecode;
@@ -516,29 +575,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 
        while (len > 0) {
                const struct inet_diag_bc_op *op = bc;
+               int min_len = sizeof(struct inet_diag_bc_op);
 
 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
                switch (op->code) {
-               case INET_DIAG_BC_AUTO:
                case INET_DIAG_BC_S_COND:
                case INET_DIAG_BC_D_COND:
+                       if (!valid_hostcond(bc, len, &min_len))
+                               return -EINVAL;
+                       break;
                case INET_DIAG_BC_S_GE:
                case INET_DIAG_BC_S_LE:
                case INET_DIAG_BC_D_GE:
                case INET_DIAG_BC_D_LE:
-               case INET_DIAG_BC_JMP:
-                       if (op->no < 4 || op->no > len + 4 || op->no & 3)
-                               return -EINVAL;
-                       if (op->no < len &&
-                           !valid_cc(bytecode, bytecode_len, len - op->no))
+                       if (!valid_port_comparison(bc, len, &min_len))
                                return -EINVAL;
                        break;
+               case INET_DIAG_BC_AUTO:
+               case INET_DIAG_BC_JMP:
                case INET_DIAG_BC_NOP:
                        break;
                default:
                        return -EINVAL;
                }
-               if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
+
+               if (op->code != INET_DIAG_BC_NOP) {
+                       if (op->no < min_len || op->no > len + 4 || op->no & 3)
+                               return -EINVAL;
+                       if (op->no < len &&
+                           !valid_cc(bytecode, bytecode_len, len - op->no))
+                               return -EINVAL;
+               }
+
+               if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
                        return -EINVAL;
                bc  += op->yes;
                len -= op->yes;
@@ -596,6 +665,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
+/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
+ * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
+ */
+static inline void inet_diag_req_addrs(const struct sock *sk,
+                                      const struct request_sock *req,
+                                      struct inet_diag_entry *entry)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6) {
+               if (req->rsk_ops->family == AF_INET6) {
+                       entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
+                       entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
+               } else if (req->rsk_ops->family == AF_INET) {
+                       ipv6_addr_set_v4mapped(ireq->loc_addr,
+                                              &entry->saddr_storage);
+                       ipv6_addr_set_v4mapped(ireq->rmt_addr,
+                                              &entry->daddr_storage);
+                       entry->saddr = entry->saddr_storage.s6_addr32;
+                       entry->daddr = entry->daddr_storage.s6_addr32;
+               }
+       } else
+#endif
+       {
+               entry->saddr = &ireq->loc_addr;
+               entry->daddr = &ireq->rmt_addr;
+       }
+}
+
 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
                              struct request_sock *req,
                              struct user_namespace *user_ns,
@@ -637,8 +736,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_inode = 0;
 #if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
-               *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
-               *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
+               struct inet_diag_entry entry;
+               inet_diag_req_addrs(sk, req, &entry);
+               memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
+               memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
        }
 #endif
 
@@ -691,18 +792,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
                                continue;
 
                        if (bc) {
-                               entry.saddr =
-#if IS_ENABLED(CONFIG_IPV6)
-                                       (entry.family == AF_INET6) ?
-                                       inet6_rsk(req)->loc_addr.s6_addr32 :
-#endif
-                                       &ireq->loc_addr;
-                               entry.daddr =
-#if IS_ENABLED(CONFIG_IPV6)
-                                       (entry.family == AF_INET6) ?
-                                       inet6_rsk(req)->rmt_addr.s6_addr32 :
-#endif
-                                       &ireq->rmt_addr;
+                               inet_diag_req_addrs(sk, req, &entry);
                                entry.dport = ntohs(ireq->rmt_port);
 
                                if (!inet_diag_bc_run(bc, &entry))
index 448e68546827431098c980bafc4a63967764942f..8d5cc75dac88286fa6a10f24d7cb8f16c7c1bc14 100644 (file)
@@ -707,28 +707,27 @@ EXPORT_SYMBOL(ip_defrag);
 
 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
 {
-       const struct iphdr *iph;
+       struct iphdr iph;
        u32 len;
 
        if (skb->protocol != htons(ETH_P_IP))
                return skb;
 
-       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+       if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
                return skb;
 
-       iph = ip_hdr(skb);
-       if (iph->ihl < 5 || iph->version != 4)
+       if (iph.ihl < 5 || iph.version != 4)
                return skb;
-       if (!pskb_may_pull(skb, iph->ihl*4))
-               return skb;
-       iph = ip_hdr(skb);
-       len = ntohs(iph->tot_len);
-       if (skb->len < len || len < (iph->ihl * 4))
+
+       len = ntohs(iph.tot_len);
+       if (skb->len < len || len < (iph.ihl * 4))
                return skb;
 
-       if (ip_is_fragment(ip_hdr(skb))) {
+       if (ip_is_fragment(&iph)) {
                skb = skb_share_check(skb, GFP_ATOMIC);
                if (skb) {
+                       if (!pskb_may_pull(skb, iph.ihl*4))
+                               return skb;
                        if (pskb_trim_rcsum(skb, len))
                                return skb;
                        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
index 6168c4dc58b1db546c567ef0c55842ab42edee16..3eab2b2ffd34e41fd84588a27c23de0d43e16fc4 100644 (file)
@@ -1318,6 +1318,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
                if (get_user(v, (u32 __user *)optval))
                        return -EFAULT;
 
+               /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
+               if (v != RT_TABLE_DEFAULT && v >= 1000000000)
+                       return -EINVAL;
+
                rtnl_lock();
                ret = 0;
                if (sk == rtnl_dereference(mrt->mroute_sk)) {
index 083092e3aed68db2dd5e9c3fb9cd43759a529abe..e457c7ab2e28daf5607e93ef0c31e667f1cec216 100644 (file)
@@ -830,8 +830,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
        return mss_now;
 }
 
-static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
-                        size_t psize, int flags)
+static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+                               size_t size, int flags)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int mss_now, size_goal;
@@ -858,12 +858,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
 
-       while (psize > 0) {
+       while (size > 0) {
                struct sk_buff *skb = tcp_write_queue_tail(sk);
-               struct page *page = pages[poffset / PAGE_SIZE];
                int copy, i;
-               int offset = poffset % PAGE_SIZE;
-               int size = min_t(size_t, psize, PAGE_SIZE - offset);
                bool can_coalesce;
 
                if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
@@ -912,8 +909,8 @@ new_segment:
                        TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
                copied += copy;
-               poffset += copy;
-               if (!(psize -= copy))
+               offset += copy;
+               if (!(size -= copy))
                        goto out;
 
                if (skb->len < size_goal || (flags & MSG_OOB))
@@ -960,7 +957,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
                                        flags);
 
        lock_sock(sk);
-       res = do_tcp_sendpages(sk, &page, offset, size, flags);
+       res = do_tcp_sendpages(sk, page, offset, size, flags);
        release_sock(sk);
        return res;
 }
index 609ff98aeb47ce99500614ff4a8a9cfe3a290d28..181fc8234a529d5b6663f683e0da19e5ab17c1e8 100644 (file)
@@ -5645,7 +5645,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
        tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
 
        if (data) { /* Retransmit unacked data in SYN */
-               tcp_retransmit_skb(sk, data);
+               tcp_for_write_queue_from(data, sk) {
+                       if (data == tcp_send_head(sk) ||
+                           __tcp_retransmit_skb(sk, data))
+                               break;
+               }
                tcp_rearm_rto(sk);
                return true;
        }
index 2798706cb06385aa87fdb05ba1ed2c6a0a14e6e5..948ac275b9b52ed10c1adaa9f931760a0903c788 100644 (file)
@@ -2309,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
  * state updates are done by the caller.  Returns non-zero if an
  * error occurred which prevented the send.
  */
-int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        unsigned int cur_mss;
-       int err;
 
        /* Inconslusive MTU probe */
        if (icsk->icsk_mtup.probe_size) {
@@ -2387,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
                struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
                                                   GFP_ATOMIC);
-               err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-                            -ENOBUFS;
+               return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+                             -ENOBUFS;
        } else {
-               err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+               return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
+}
+
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int err = __tcp_retransmit_skb(sk, skb);
 
        if (err == 0) {
                /* Update global TCP statistics. */
index 1002e3396f7287049b41c53e4aaa5cdc285a73e6..ae43c62f9045ba94ced0f025a9ecacb3bf145af1 100644 (file)
@@ -441,6 +441,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
        lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
        if (lsap == NULL) {
                IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__);
+               __irttp_close_tsap(self);
                return NULL;
        }
 
index 83608ac167801f1c06fc55dd3ab53370d947cbc7..2c84185dfdb0c2e9901d86b0903463061ddaaecf 100644 (file)
@@ -458,8 +458,6 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
                list_move_tail(&roc->list, &tmp_list);
                roc->abort = true;
        }
-
-       ieee80211_start_next_roc(local);
        mutex_unlock(&local->mtx);
 
        list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
index b9a63381e34998e08ab5271f7d82cca9058a76d8..45a101439bc5dc62798e62b1516b78a9d8096272 100644 (file)
@@ -793,7 +793,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
                [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
                [IPSET_ATTR_IFACE]      = { .type = NLA_NUL_STRING,
-                                           .len = IPSET_MAXNAMELEN - 1 },
+                                           .len  = IFNAMSIZ - 1 },
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
                [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
index 98c70630ad06178778dc4f1e04275fa6032facc8..733cbf49ed1f506ae263d50675775e2e96e3356d 100644 (file)
@@ -702,15 +702,11 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
                        /* We only match on the lower 8 bits of the opcode. */
                        if (ntohs(arp->ar_op) <= 0xff)
                                key->ip.proto = ntohs(arp->ar_op);
-
-                       if (key->ip.proto == ARPOP_REQUEST
-                                       || key->ip.proto == ARPOP_REPLY) {
-                               memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
-                               memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
-                               memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
-                               memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
-                               key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
-                       }
+                       memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+                       memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+                       memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+                       memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
                }
        } else if (key->eth.type == htons(ETH_P_IPV6)) {
                int nh_len;             /* IPv6 Header + Extensions */
index 3c1e58ba714bf9534b597c9f28147cc7c3038b39..a9033481fa5e867b2264fa9f834a4041786ed166 100644 (file)
@@ -158,7 +158,7 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
 
        if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
                net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-                                    ovs_dp_name(vport->dp),
+                                    netdev_vport->dev->name,
                                     packet_length(skb), mtu);
                goto error;
        }
index 7c2df9c33df37a588c426e7945cd71233edd4577..69ce21e3716f89fbfc78b050ffc3943455906d27 100644 (file)
@@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 
        msg = sctp_datamsg_new(GFP_KERNEL);
        if (!msg)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        /* Note: Calculate this outside of the loop, so that all fragments
         * have the same expiration.
@@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 
                chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
 
-               if (!chunk)
+               if (!chunk) {
+                       err = -ENOMEM;
                        goto errout;
+               }
+
                err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
                if (err < 0)
-                       goto errout;
+                       goto errout_chunk_free;
 
                offset += len;
 
@@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 
                chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
 
-               if (!chunk)
+               if (!chunk) {
+                       err = -ENOMEM;
                        goto errout;
+               }
 
                err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
 
@@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
                           - (__u8 *)chunk->skb->data);
                if (err < 0)
-                       goto errout;
+                       goto errout_chunk_free;
 
                sctp_datamsg_assign(msg, chunk);
                list_add_tail(&chunk->frag_list, &msg->chunks);
@@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 
        return msg;
 
+errout_chunk_free:
+       sctp_chunk_free(chunk);
+
 errout:
        list_for_each_safe(pos, temp, &msg->chunks) {
                list_del_init(pos);
@@ -339,7 +347,7 @@ errout:
                sctp_chunk_free(chunk);
        }
        sctp_datamsg_put(msg);
-       return NULL;
+       return ERR_PTR(err);
 }
 
 /* Check whether this message has expired. */
index a60d1f8b41c5e2330e837265e175202aba322fcb..406d957d08fbcb7d9cb532a8143d363c277c3ec8 100644 (file)
@@ -1915,8 +1915,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        /* Break the message into multiple chunks of maximum size. */
        datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
-       if (!datamsg) {
-               err = -ENOMEM;
+       if (IS_ERR(datamsg)) {
+               err = PTR_ERR(datamsg);
                goto out_free;
        }
 
index 953c21e4af977a752362187976e84b578bdb085c..206cf5238fd3ef00183f87e52734398ab475fbdb 100644 (file)
@@ -331,7 +331,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
                 * 1/8, rto_alpha would be expressed as 3.
                 */
                tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
-                       + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+                       + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
                tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
                        + (rtt >> net->sctp.rto_alpha);
        } else {
index 3ae43947a171f3d9924a22898fe1e410beb89a98..1f9a529fe544f768c4322e884e4c34d02d482b5e 100644 (file)
@@ -31,44 +31,44 @@ help:
        @echo '  clean: a summary clean target to clean _all_ folders'
 
 cpupower: FORCE
-       $(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1)
+       $(call descend,power/$@)
 
 firewire lguest perf usb virtio vm: FORCE
-       $(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1)
+       $(call descend,$@)
 
 selftests: FORCE
-       $(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1)
+       $(call descend,testing/$@)
 
 turbostat x86_energy_perf_policy: FORCE
-       $(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1)
+       $(call descend,power/x86/$@)
 
 cpupower_install:
-       $(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install
+       $(call descend,power/$(@:_install=),install)
 
 firewire_install lguest_install perf_install usb_install virtio_install vm_install:
-       $(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install
+       $(call descend,$(@:_install=),install)
 
 selftests_install:
-       $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install
+       $(call descend,testing/$(@:_clean=),install)
 
 turbostat_install x86_energy_perf_policy_install:
-       $(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install
+       $(call descend,power/x86/$(@:_install=),install)
 
 install: cpupower_install firewire_install lguest_install perf_install \
                selftests_install turbostat_install usb_install virtio_install \
                vm_install x86_energy_perf_policy_install
 
 cpupower_clean:
-       $(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean
+       $(call descend,power/cpupower,clean)
 
 firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
-       $(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean
+       $(call descend,$(@:_clean=),clean)
 
 selftests_clean:
-       $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
+       $(call descend,testing/$(@:_clean=),clean)
 
 turbostat_clean x86_energy_perf_policy_clean:
-       $(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
+       $(call descend,power/x86/$(@:_clean=),clean)
 
 clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \
                turbostat_clean usb_clean virtio_clean vm_clean \
index 00deed4d61598c5d40d8c607a0da1f42402cefd6..0a619af5be4377fd395dc1b638b40d095d9d7cdd 100644 (file)
@@ -169,7 +169,34 @@ endif
 
 ### --- END CONFIGURATION SECTION ---
 
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+ifneq ($(objtree),)
+#$(info Determined 'objtree' to be $(objtree))
+endif
+
+ifneq ($(OUTPUT),)
+#$(info Determined 'OUTPUT' to be $(OUTPUT))
+endif
+
+BASIC_CFLAGS = \
+       -Iutil/include \
+       -Iarch/$(ARCH)/include \
+       $(if $(objtree),-I$(objtree)/arch/$(ARCH)/include/generated/uapi) \
+       -I$(srctree)/arch/$(ARCH)/include/uapi \
+       -I$(srctree)/arch/$(ARCH)/include \
+       $(if $(objtree),-I$(objtree)/include/generated/uapi) \
+       -I$(srctree)/include/uapi \
+       -I$(srctree)/include \
+       -I$(OUTPUT)util \
+       -Iutil \
+       -I. \
+       -I$(TRACE_EVENT_DIR) \
+       -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 BASIC_LDFLAGS =
 
 # Guard against environment variables
index 46fc9f15c6b37e23daf4ba5ae72bbf62226e461b..7fcdcdbee917a8473acf25fe7ac72c6c5a9732b0 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <stdlib.h>
 #include "../../util/types.h"
-#include "../../../../../arch/x86/include/asm/perf_regs.h"
+#include <asm/perf_regs.h>
 
 #ifndef ARCH_X86_64
 #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
index 260abc535b5b428e817ebaa5a4354fbe67a947e1..283b4397e397fdd3155c4bcf11b23caeeb2bb139 100644 (file)
 #include <pthread.h>
 #include <math.h>
 
-#include "../../arch/x86/include/asm/svm.h"
-#include "../../arch/x86/include/asm/vmx.h"
-#include "../../arch/x86/include/asm/kvm.h"
+#if defined(__i386__) || defined(__x86_64__)
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
 
 struct event_key {
        #define INVALID_KEY     (~0ULL)
@@ -58,7 +59,7 @@ struct kvm_event_key {
 };
 
 
-struct perf_kvm;
+struct perf_kvm_stat;
 
 struct kvm_events_ops {
        bool (*is_begin_event)(struct perf_evsel *evsel,
@@ -66,7 +67,7 @@ struct kvm_events_ops {
                               struct event_key *key);
        bool (*is_end_event)(struct perf_evsel *evsel,
                             struct perf_sample *sample, struct event_key *key);
-       void (*decode_key)(struct perf_kvm *kvm, struct event_key *key,
+       void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
                           char decode[20]);
        const char *name;
 };
@@ -79,7 +80,7 @@ struct exit_reasons_table {
 #define EVENTS_BITS            12
 #define EVENTS_CACHE_SIZE      (1UL << EVENTS_BITS)
 
-struct perf_kvm {
+struct perf_kvm_stat {
        struct perf_tool    tool;
        struct perf_session *session;
 
@@ -146,7 +147,7 @@ static struct exit_reasons_table svm_exit_reasons[] = {
        SVM_EXIT_REASONS
 };
 
-static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code)
+static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
 {
        int i = kvm->exit_reasons_size;
        struct exit_reasons_table *tbl = kvm->exit_reasons;
@@ -162,7 +163,7 @@ static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code)
        return "UNKNOWN";
 }
 
-static void exit_event_decode_key(struct perf_kvm *kvm,
+static void exit_event_decode_key(struct perf_kvm_stat *kvm,
                                  struct event_key *key,
                                  char decode[20])
 {
@@ -228,7 +229,7 @@ static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
        return false;
 }
 
-static void mmio_event_decode_key(struct perf_kvm *kvm __maybe_unused,
+static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
                                  struct event_key *key,
                                  char decode[20])
 {
@@ -271,7 +272,7 @@ static bool ioport_event_end(struct perf_evsel *evsel,
        return kvm_entry_event(evsel);
 }
 
-static void ioport_event_decode_key(struct perf_kvm *kvm __maybe_unused,
+static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
                                    struct event_key *key,
                                    char decode[20])
 {
@@ -286,7 +287,7 @@ static struct kvm_events_ops ioport_events = {
        .name = "IO Port Access"
 };
 
-static bool register_kvm_events_ops(struct perf_kvm *kvm)
+static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
 {
        bool ret = true;
 
@@ -311,7 +312,7 @@ struct vcpu_event_record {
 };
 
 
-static void init_kvm_event_record(struct perf_kvm *kvm)
+static void init_kvm_event_record(struct perf_kvm_stat *kvm)
 {
        int i;
 
@@ -360,7 +361,7 @@ static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
        return event;
 }
 
-static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm,
+static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
                                               struct event_key *key)
 {
        struct kvm_event *event;
@@ -381,7 +382,7 @@ static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm,
        return event;
 }
 
-static bool handle_begin_event(struct perf_kvm *kvm,
+static bool handle_begin_event(struct perf_kvm_stat *kvm,
                               struct vcpu_event_record *vcpu_record,
                               struct event_key *key, u64 timestamp)
 {
@@ -425,7 +426,7 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
        return true;
 }
 
-static bool handle_end_event(struct perf_kvm *kvm,
+static bool handle_end_event(struct perf_kvm_stat *kvm,
                             struct vcpu_event_record *vcpu_record,
                             struct event_key *key,
                             u64 timestamp)
@@ -486,7 +487,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
        return thread->priv;
 }
 
-static bool handle_kvm_event(struct perf_kvm *kvm,
+static bool handle_kvm_event(struct perf_kvm_stat *kvm,
                             struct thread *thread,
                             struct perf_evsel *evsel,
                             struct perf_sample *sample)
@@ -541,7 +542,7 @@ static struct kvm_event_key keys[] = {
        { NULL, NULL }
 };
 
-static bool select_key(struct perf_kvm *kvm)
+static bool select_key(struct perf_kvm_stat *kvm)
 {
        int i;
 
@@ -577,7 +578,8 @@ static void insert_to_result(struct rb_root *result, struct kvm_event *event,
        rb_insert_color(&event->rb, result);
 }
 
-static void update_total_count(struct perf_kvm *kvm, struct kvm_event *event)
+static void
+update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
 {
        int vcpu = kvm->trace_vcpu;
 
@@ -590,7 +592,7 @@ static bool event_is_valid(struct kvm_event *event, int vcpu)
        return !!get_event_count(event, vcpu);
 }
 
-static void sort_result(struct perf_kvm *kvm)
+static void sort_result(struct perf_kvm_stat *kvm)
 {
        unsigned int i;
        int vcpu = kvm->trace_vcpu;
@@ -627,7 +629,7 @@ static void print_vcpu_info(int vcpu)
                pr_info("VCPU %d:\n\n", vcpu);
 }
 
-static void print_result(struct perf_kvm *kvm)
+static void print_result(struct perf_kvm_stat *kvm)
 {
        char decode[20];
        struct kvm_event *event;
@@ -670,7 +672,8 @@ static int process_sample_event(struct perf_tool *tool,
                                struct machine *machine)
 {
        struct thread *thread = machine__findnew_thread(machine, sample->tid);
-       struct perf_kvm *kvm = container_of(tool, struct perf_kvm, tool);
+       struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
+                                                tool);
 
        if (thread == NULL) {
                pr_debug("problem processing %d event, skipping it.\n",
@@ -701,7 +704,7 @@ static int get_cpu_isa(struct perf_session *session)
        return isa;
 }
 
-static int read_events(struct perf_kvm *kvm)
+static int read_events(struct perf_kvm_stat *kvm)
 {
        int ret;
 
@@ -750,7 +753,7 @@ static bool verify_vcpu(int vcpu)
        return true;
 }
 
-static int kvm_events_report_vcpu(struct perf_kvm *kvm)
+static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
 {
        int ret = -EINVAL;
        int vcpu = kvm->trace_vcpu;
@@ -798,7 +801,8 @@ static const char * const record_args[] = {
                _p;                     \
        })
 
-static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv)
+static int
+kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
 {
        unsigned int rec_argc, i, j;
        const char **rec_argv;
@@ -821,7 +825,8 @@ static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv)
        return cmd_record(i, rec_argv, NULL);
 }
 
-static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv)
+static int
+kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
 {
        const struct option kvm_events_report_options[] = {
                OPT_STRING(0, "event", &kvm->report_event, "report event",
@@ -864,24 +869,37 @@ static void print_kvm_stat_usage(void)
        printf("\nOtherwise, it is the alias of 'perf stat':\n");
 }
 
-static int kvm_cmd_stat(struct perf_kvm *kvm, int argc, const char **argv)
+static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
 {
+       struct perf_kvm_stat kvm = {
+               .file_name = file_name,
+
+               .trace_vcpu     = -1,
+               .report_event   = "vmexit",
+               .sort_key       = "sample",
+
+               .exit_reasons = svm_exit_reasons,
+               .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
+               .exit_reasons_isa = "SVM",
+       };
+
        if (argc == 1) {
                print_kvm_stat_usage();
                goto perf_stat;
        }
 
        if (!strncmp(argv[1], "rec", 3))
-               return kvm_events_record(kvm, argc - 1, argv + 1);
+               return kvm_events_record(&kvm, argc - 1, argv + 1);
 
        if (!strncmp(argv[1], "rep", 3))
-               return kvm_events_report(kvm, argc - 1 , argv + 1);
+               return kvm_events_report(&kvm, argc - 1 , argv + 1);
 
 perf_stat:
        return cmd_stat(argc, argv, NULL);
 }
+#endif
 
-static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv)
+static int __cmd_record(const char *file_name, int argc, const char **argv)
 {
        int rec_argc, i = 0, j;
        const char **rec_argv;
@@ -890,7 +908,7 @@ static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv)
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
        rec_argv[i++] = strdup("record");
        rec_argv[i++] = strdup("-o");
-       rec_argv[i++] = strdup(kvm->file_name);
+       rec_argv[i++] = strdup(file_name);
        for (j = 1; j < argc; j++, i++)
                rec_argv[i] = argv[j];
 
@@ -899,7 +917,7 @@ static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv)
        return cmd_record(i, rec_argv, NULL);
 }
 
-static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv)
+static int __cmd_report(const char *file_name, int argc, const char **argv)
 {
        int rec_argc, i = 0, j;
        const char **rec_argv;
@@ -908,7 +926,7 @@ static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv)
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
        rec_argv[i++] = strdup("report");
        rec_argv[i++] = strdup("-i");
-       rec_argv[i++] = strdup(kvm->file_name);
+       rec_argv[i++] = strdup(file_name);
        for (j = 1; j < argc; j++, i++)
                rec_argv[i] = argv[j];
 
@@ -917,7 +935,8 @@ static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv)
        return cmd_report(i, rec_argv, NULL);
 }
 
-static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv)
+static int
+__cmd_buildid_list(const char *file_name, int argc, const char **argv)
 {
        int rec_argc, i = 0, j;
        const char **rec_argv;
@@ -926,7 +945,7 @@ static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv)
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
        rec_argv[i++] = strdup("buildid-list");
        rec_argv[i++] = strdup("-i");
-       rec_argv[i++] = strdup(kvm->file_name);
+       rec_argv[i++] = strdup(file_name);
        for (j = 1; j < argc; j++, i++)
                rec_argv[i] = argv[j];
 
@@ -937,20 +956,12 @@ static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv)
 
 int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
 {
-       struct perf_kvm kvm = {
-               .trace_vcpu     = -1,
-               .report_event   = "vmexit",
-               .sort_key       = "sample",
-
-               .exit_reasons = svm_exit_reasons,
-               .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
-               .exit_reasons_isa = "SVM",
-       };
+       const char *file_name;
 
        const struct option kvm_options[] = {
-               OPT_STRING('i', "input", &kvm.file_name, "file",
+               OPT_STRING('i', "input", &file_name, "file",
                           "Input file name"),
-               OPT_STRING('o', "output", &kvm.file_name, "file",
+               OPT_STRING('o', "output", &file_name, "file",
                           "Output file name"),
                OPT_BOOLEAN(0, "guest", &perf_guest,
                            "Collect guest os data"),
@@ -985,32 +996,34 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
        if (!perf_host)
                perf_guest = 1;
 
-       if (!kvm.file_name) {
+       if (!file_name) {
                if (perf_host && !perf_guest)
-                       kvm.file_name = strdup("perf.data.host");
+                       file_name = strdup("perf.data.host");
                else if (!perf_host && perf_guest)
-                       kvm.file_name = strdup("perf.data.guest");
+                       file_name = strdup("perf.data.guest");
                else
-                       kvm.file_name = strdup("perf.data.kvm");
+                       file_name = strdup("perf.data.kvm");
 
-               if (!kvm.file_name) {
+               if (!file_name) {
                        pr_err("Failed to allocate memory for filename\n");
                        return -ENOMEM;
                }
        }
 
        if (!strncmp(argv[0], "rec", 3))
-               return __cmd_record(&kvm, argc, argv);
+               return __cmd_record(file_name, argc, argv);
        else if (!strncmp(argv[0], "rep", 3))
-               return __cmd_report(&kvm, argc, argv);
+               return __cmd_report(file_name, argc, argv);
        else if (!strncmp(argv[0], "diff", 4))
                return cmd_diff(argc, argv, NULL);
        else if (!strncmp(argv[0], "top", 3))
                return cmd_top(argc, argv, NULL);
        else if (!strncmp(argv[0], "buildid-list", 12))
-               return __cmd_buildid_list(&kvm, argc, argv);
+               return __cmd_buildid_list(file_name, argc, argv);
+#if defined(__i386__) || defined(__x86_64__)
        else if (!strncmp(argv[0], "stat", 4))
-               return kvm_cmd_stat(&kvm, argc, argv);
+               return kvm_cmd_stat(file_name, argc, argv);
+#endif
        else
                usage_with_options(kvm_usage, kvm_options);
 
index 484f26cc0c00c6ee80d74e93bddc3fe24eae4c1f..5acd6e8e658bfcae9dad3e3cb9656481a084f450 100644 (file)
@@ -15,7 +15,7 @@
 #include "util/thread_map.h"
 #include "util/pmu.h"
 #include "event-parse.h"
-#include "../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
 
 #include <sys/mman.h>
 
index c50985eaec41f479d2593dab749d061d3cb03c3d..238f923f22181698f1610ed8b14f512b76706fb1 100644 (file)
@@ -5,8 +5,9 @@ struct winsize;
 
 void get_term_dimensions(struct winsize *ws);
 
+#include <asm/unistd.h>
+
 #if defined(__i386__)
-#include "../../arch/x86/include/asm/unistd.h"
 #define rmb()          asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
 #define CPUINFO_PROC   "model name"
@@ -16,7 +17,6 @@ void get_term_dimensions(struct winsize *ws);
 #endif
 
 #if defined(__x86_64__)
-#include "../../arch/x86/include/asm/unistd.h"
 #define rmb()          asm volatile("lfence" ::: "memory")
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
 #define CPUINFO_PROC   "model name"
@@ -26,20 +26,17 @@ void get_term_dimensions(struct winsize *ws);
 #endif
 
 #ifdef __powerpc__
-#include "../../arch/powerpc/include/asm/unistd.h"
 #define rmb()          asm volatile ("sync" ::: "memory")
 #define cpu_relax()    asm volatile ("" ::: "memory");
 #define CPUINFO_PROC   "cpu"
 #endif
 
 #ifdef __s390__
-#include "../../arch/s390/include/asm/unistd.h"
 #define rmb()          asm volatile("bcr 15,0" ::: "memory")
 #define cpu_relax()    asm volatile("" ::: "memory");
 #endif
 
 #ifdef __sh__
-#include "../../arch/sh/include/asm/unistd.h"
 #if defined(__SH4A__) || defined(__SH5__)
 # define rmb()         asm volatile("synco" ::: "memory")
 #else
@@ -50,35 +47,30 @@ void get_term_dimensions(struct winsize *ws);
 #endif
 
 #ifdef __hppa__
-#include "../../arch/parisc/include/asm/unistd.h"
 #define rmb()          asm volatile("" ::: "memory")
 #define cpu_relax()    asm volatile("" ::: "memory");
 #define CPUINFO_PROC   "cpu"
 #endif
 
 #ifdef __sparc__
-#include "../../arch/sparc/include/uapi/asm/unistd.h"
 #define rmb()          asm volatile("":::"memory")
 #define cpu_relax()    asm volatile("":::"memory")
 #define CPUINFO_PROC   "cpu"
 #endif
 
 #ifdef __alpha__
-#include "../../arch/alpha/include/asm/unistd.h"
 #define rmb()          asm volatile("mb" ::: "memory")
 #define cpu_relax()    asm volatile("" ::: "memory")
 #define CPUINFO_PROC   "cpu model"
 #endif
 
 #ifdef __ia64__
-#include "../../arch/ia64/include/asm/unistd.h"
 #define rmb()          asm volatile ("mf" ::: "memory")
 #define cpu_relax()    asm volatile ("hint @pause" ::: "memory")
 #define CPUINFO_PROC   "model name"
 #endif
 
 #ifdef __arm__
-#include "../../arch/arm/include/asm/unistd.h"
 /*
  * Use the __kuser_memory_barrier helper in the CPU helper page. See
  * arch/arm/kernel/entry-armv.S in the kernel source for details.
@@ -89,13 +81,11 @@ void get_term_dimensions(struct winsize *ws);
 #endif
 
 #ifdef __aarch64__
-#include "../../arch/arm64/include/asm/unistd.h"
 #define rmb()          asm volatile("dmb ld" ::: "memory")
 #define cpu_relax()    asm volatile("yield" ::: "memory")
 #endif
 
 #ifdef __mips__
-#include "../../arch/mips/include/asm/unistd.h"
 #define rmb()          asm volatile(                                   \
                                ".set   mips2\n\t"                      \
                                "sync\n\t"                              \
@@ -112,7 +102,7 @@ void get_term_dimensions(struct winsize *ws);
 #include <sys/types.h>
 #include <sys/syscall.h>
 
-#include "../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
 #include "util/types.h"
 #include <stdbool.h>
 
index 618d41140abd368072d9c978f70969233cd70260..d144d464ce39d12eed66e22342ad2ebc2ac8cc7f 100644 (file)
@@ -18,8 +18,8 @@
 #include "cpumap.h"
 #include "thread_map.h"
 #include "target.h"
-#include "../../../include/linux/hw_breakpoint.h"
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
 #include "perf_regs.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
index 6f94d6dea00f4b26efb86533509b4c9dace89737..d99b476ef37c723be395885c3d4d17798873ab6d 100644 (file)
@@ -3,7 +3,8 @@
 
 #include <linux/list.h>
 #include <stdbool.h>
-#include "../../../include/uapi/linux/perf_event.h"
+#include <stddef.h>
+#include <linux/perf_event.h>
 #include "types.h"
 #include "xyarray.h"
 #include "cgroup.h"
index 7daad237dea530f504f764b8189b06957b1905e6..566b84c695c880b43349675780ff88225b2828cc 100644 (file)
@@ -1378,6 +1378,8 @@ static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
 
                str = tmp + 1;
                fprintf(fp, "# node%u cpu list : %s\n", c, str);
+
+               str += strlen(str) + 1;
        }
        return;
 error:
index 879d215cdac9007c360fbd4862c2e0d3aaa514e3..9bc00783f24f4648000ee1e10e3b194aec463a22 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __PERF_HEADER_H
 #define __PERF_HEADER_H
 
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
 #include <sys/types.h>
 #include <stdbool.h>
 #include "types.h"
index 516ecd9ddd6ee275c1e3144abc8a3d091b6114bb..6ef213b35ecd0bf4c83729f8c71f05c3631716fd 100644 (file)
@@ -3,7 +3,7 @@
 #include "evsel.h"
 #include "evlist.h"
 #include "sysfs.h"
-#include "../../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
 
 #define TEST_ASSERT_VAL(text, cond) \
 do { \
index 75c7b0fca6d96baadc7b084c84a73d3f89ea3273..6b6d03e93c3d92790a53d5e67ead9f023a221cd5 100644 (file)
@@ -1,4 +1,4 @@
-#include "../../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
 #include "util.h"
 #include "../perf.h"
 #include "evlist.h"
index 839230ceb18bd918b194d0fc719607445ad7bb00..2820c407adb23017dbdcc5139dae3dc7cada738a 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/list.h>
 #include <stdbool.h>
 #include "types.h"
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
 #include "types.h"
 
 struct list_head;
index 39f3abac77448021daf35d13f5c8fea57538db49..fdeb8ac7c5d27fee5ee15978edbfd90ce9270054 100644 (file)
@@ -2,7 +2,7 @@
 #define __PMU_H
 
 #include <linux/bitops.h>
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
 
 enum {
        PERF_PMU_FORMAT_VALUE_CONFIG,
index dd6426163ba6d70332d03a7aeac1205f59e146d4..0eae00ad5fe7ad1e9124cf52f1975c8616451b63 100644 (file)
@@ -7,7 +7,7 @@
 #include "symbol.h"
 #include "thread.h"
 #include <linux/rbtree.h>
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
 
 struct sample_queue;
 struct ip_callchain;
index 2eeb51baf077f16a84386840dab2539c8512dfb2..cfa906882e2ceb2ed138fb4fd996474324f4d6c3 100644 (file)
@@ -90,17 +90,17 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
        if (!strbuf_avail(sb))
                strbuf_grow(sb, 64);
        va_start(ap, fmt);
-       len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+       len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
        va_end(ap);
        if (len < 0)
-               die("your vscnprintf is broken");
+               die("your vsnprintf is broken");
        if (len > strbuf_avail(sb)) {
                strbuf_grow(sb, len);
                va_start(ap, fmt);
-               len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+               len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
                va_end(ap);
                if (len > strbuf_avail(sb)) {
-                       die("this should not happen, your snprintf is broken");
+                       die("this should not happen, your vsnprintf is broken");
                }
        }
        strbuf_setlen(sb, sb->len + len);
index 96ce80a3743ba22e13935f238e9835aca8923326..2964b96aa55fd350896cd45efc5a9e13538551ad 100644 (file)
@@ -1,8 +1,11 @@
-ifeq ("$(origin O)", "command line")
+ifeq ($(origin O), command line)
        dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
        ABSOLUTE_O := $(shell cd $(O) ; pwd)
-       OUTPUT := $(ABSOLUTE_O)/
+       OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
        COMMAND_O := O=$(ABSOLUTE_O)
+ifeq ($(objtree),)
+       objtree := $(O)
+endif
 endif
 
 ifneq ($(OUTPUT),)
@@ -41,7 +44,16 @@ else
 NO_SUBDIR = :
 endif
 
-QUIET_SUBDIR0  = +$(MAKE) -C # space to separate -C and subdir
+#
+# Define a callable command for descending to a new directory
+#
+# Call by doing: $(call descend,directory[,target])
+#
+descend = \
+       +mkdir -p $(OUTPUT)$(1) && \
+       $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
+
+QUIET_SUBDIR0  = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir
 QUIET_SUBDIR1  =
 
 ifneq ($(findstring $(MAKEFLAGS),s),s)
@@ -56,5 +68,10 @@ ifndef V
                         $(MAKE) $(PRINT_DIR) -C $$subdir
        QUIET_FLEX     = @echo '   ' FLEX $@;
        QUIET_BISON    = @echo '   ' BISON $@;
+
+       descend = \
+               @echo '   ' DESCEND $(1); \
+               mkdir -p $(OUTPUT)$(1) && \
+               $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
 endif
 endif
This page took 0.164434 seconds and 5 git commands to generate.