From: Mauro Carvalho Chehab Date: Tue, 11 Dec 2012 13:28:37 +0000 (-0200) Subject: Merge branch 'for_3.8-rc1' into v4l_for_linus X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=77c53d0b56264a8fc5844e087ad15fffe20c299d;hp=d8658bca2e5696df2b6c69bc5538f8fe54e4a01e;p=deliverable%2Flinux.git Merge branch 'for_3.8-rc1' into v4l_for_linus * for_3.8-rc1: (243 commits) [media] omap3isp: Replace cpu_is_omap3630() with ISP revision check [media] omap3isp: Prepare/unprepare clocks before/after enable/disable [media] omap3isp: preview: Add support for 8-bit formats at the sink pad [media] omap3isp: Replace printk with dev_* [media] omap3isp: Find source pad from external entity [media] omap3isp: Configure CSI-2 phy based on platform data [media] omap3isp: Add PHY routing configuration [media] omap3isp: Add CSI configuration registers from control block to ISP resources [media] omap3isp: Remove unneeded module memory address definitions [media] omap3isp: Use monotonic timestamps for statistics buffers [media] uvcvideo: Fix control value clamping for unsigned integer controls [media] uvcvideo: Mark first output terminal as default video node [media] uvcvideo: Add VIDIOC_[GS]_PRIORITY support [media] uvcvideo: Return -ENOTTY for unsupported ioctls [media] uvcvideo: Set device_caps in VIDIOC_QUERYCAP [media] uvcvideo: Don't fail when an unsupported format is requested [media] uvcvideo: Return -EACCES when trying to access a read/write-only control [media] uvcvideo: Set error_idx properly for extended controls API failures [media] rtl28xxu: add NOXON DAB/DAB+ USB dongle rev 2 [media] fc2580: write some registers conditionally ... --- diff --git a/Makefile b/Makefile index 3d2fc460b22f..540f7b240c77 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 7 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Terrified Chipmunk # *DOCUMENTATION* @@ -1321,10 +1321,12 @@ kernelversion: # Clear a bunch of variables before executing the submake tools/: FORCE - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ + $(Q)mkdir -p $(objtree)/tools + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ tools/%: FORCE - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $* + $(Q)mkdir -p $(objtree)/tools + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ $* # Single targets # --------------------------------------------------------------------------- diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ade7e924bef5..9759fec0b704 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -547,6 +547,7 @@ config ARCH_KIRKWOOD select CPU_FEROCEON select GENERIC_CLOCKEVENTS select PCI + select PCI_QUIRKS select PLAT_ORION_LEGACY help Support for the following Marvell Kirkwood series SoCs: diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index df13a3ffff35..9d2d3ba339ff 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c @@ -162,7 +162,6 @@ static struct clock_event_device sp804_clockevent = { .set_mode = sp804_set_mode, .set_next_event = sp804_set_next_event, .rating = 300, - .cpumask = cpu_all_mask, }; static struct irqaction sp804_timer_irq = { @@ -185,6 +184,7 @@ void __init sp804_clockevents_init(void __iomem *base, unsigned int irq, clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); evt->name = name; evt->irq = irq; + evt->cpumask = cpu_possible_mask; setup_irq(irq, &sp804_timer_irq); clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h index 7bcd0dfce4b1..b47f75038686 100644 --- a/arch/arm/mach-dove/include/mach/pm.h +++ b/arch/arm/mach-dove/include/mach/pm.h @@ -63,7 +63,7 @@ static inline int pmu_to_irq(int pin) static inline int irq_to_pmu(int irq) { - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS) + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) return irq - IRQ_DOVE_PMU_START; return -EINVAL; diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 087711524e8a..bc4344aa1009 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -46,8 +46,20 @@ static void pmu_irq_ack(struct irq_data *d) int pin = irq_to_pmu(d->irq); u32 u; + /* + * The PMU mask register is not RW0C: it is RW. This means that + * the bits take whatever value is written to them; if you write + * a '1', you will set the interrupt. + * + * Unfortunately this means there is NO race free way to clear + * these interrupts. + * + * So, let's structure the code so that the window is as small as + * possible. + */ u = ~(1 << (pin & 31)); - writel(u, PMU_INTERRUPT_CAUSE); + u &= readl_relaxed(PMU_INTERRUPT_CAUSE); + writel_relaxed(u, PMU_INTERRUPT_CAUSE); } static struct irq_chip pmu_irq_chip = { diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 1694f01ce2b6..6d6bde3e15fa 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -410,6 +410,7 @@ void __init ixp4xx_pci_preinit(void) * Enable the IO window to be way up high, at 0xfffffc00 */ local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01); + local_write_config(0x40, 4, 0x000080FF); /* No TRDY time limit */ } else { printk("PCI: IXP4xx is target - No bus scan performed\n"); } diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index fdf91a160884..8c0c0e2d0727 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -67,15 +67,12 @@ static struct map_desc ixp4xx_io_desc[] __initdata = { .pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS), .length = IXP4XX_PCI_CFG_REGION_SIZE, .type = MT_DEVICE - }, -#ifdef CONFIG_DEBUG_LL - { /* Debug UART mapping */ - .virtual = (unsigned long)IXP4XX_DEBUG_UART_BASE_VIRT, - .pfn = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS), - .length = IXP4XX_DEBUG_UART_REGION_SIZE, + }, { /* Queue Manager */ + .virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT, + .pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS), + .length = IXP4XX_QMGR_REGION_SIZE, .type = MT_DEVICE - } -#endif + }, }; void __init ixp4xx_map_io(void) diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c index b800a031207c..53b8348dfcc2 100644 --- a/arch/arm/mach-ixp4xx/goramo_mlr.c +++ b/arch/arm/mach-ixp4xx/goramo_mlr.c @@ -15,6 +15,7 @@ #include #include #include +#include #define SLOT_ETHA 0x0B /* IDSEL = AD21 */ #define SLOT_ETHB 0x0C /* IDSEL = AD20 */ @@ -329,7 +330,7 @@ static struct platform_device device_hss_tab[] = { }; -static struct platform_device *device_tab[6] __initdata = { +static struct platform_device *device_tab[7] __initdata = { &device_flash, /* index 0 */ }; diff --git a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S b/arch/arm/mach-ixp4xx/include/mach/debug-macro.S index 8c9f8d564492..ff686cbc5df4 100644 --- a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S +++ b/arch/arm/mach-ixp4xx/include/mach/debug-macro.S @@ -17,8 +17,8 @@ #else mov \rp, #0 #endif - orr \rv, \rp, #0xff000000 @ virtual - orr \rv, \rv, #0x00b00000 + orr \rv, \rp, #0xfe000000 @ virtual + orr \rv, \rv, #0x00f00000 orr \rp, \rp, #0xc8000000 @ physical .endm diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h index eb68b61ce975..c5bae9c035d5 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h @@ -30,51 +30,43 @@ * * 0x50000000 0x10000000 ioremap'd EXP BUS * - * 0x6000000 0x00004000 ioremap'd QMgr + * 0xC8000000 0x00013000 0xFEF00000 On-Chip Peripherals * - * 0xC0000000 0x00001000 0xffbff000 PCI CFG + * 0xC0000000 0x00001000 0xFEF13000 PCI CFG * - * 0xC4000000 0x00001000 0xffbfe000 EXP CFG + * 0xC4000000 0x00001000 0xFEF14000 EXP CFG * - * 0xC8000000 0x00013000 0xffbeb000 On-Chip Peripherals + * 0x60000000 0x00004000 0xFEF15000 QMgr */ /* * Queue Manager */ -#define IXP4XX_QMGR_BASE_PHYS (0x60000000) -#define IXP4XX_QMGR_REGION_SIZE (0x00004000) +#define IXP4XX_QMGR_BASE_PHYS 0x60000000 +#define IXP4XX_QMGR_BASE_VIRT IOMEM(0xFEF15000) +#define IXP4XX_QMGR_REGION_SIZE 0x00004000 /* - * Expansion BUS Configuration registers + * Peripheral space, including debug UART. Must be section-aligned so that + * it can be used with the low-level debug code. */ -#define IXP4XX_EXP_CFG_BASE_PHYS (0xC4000000) -#define IXP4XX_EXP_CFG_BASE_VIRT IOMEM(0xFFBFE000) -#define IXP4XX_EXP_CFG_REGION_SIZE (0x00001000) +#define IXP4XX_PERIPHERAL_BASE_PHYS 0xC8000000 +#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFEF00000) +#define IXP4XX_PERIPHERAL_REGION_SIZE 0x00013000 /* * PCI Config registers */ -#define IXP4XX_PCI_CFG_BASE_PHYS (0xC0000000) -#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFFBFF000) -#define IXP4XX_PCI_CFG_REGION_SIZE (0x00001000) - -/* - * Peripheral space - */ -#define IXP4XX_PERIPHERAL_BASE_PHYS (0xC8000000) -#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFFBEB000) -#define IXP4XX_PERIPHERAL_REGION_SIZE (0x00013000) +#define IXP4XX_PCI_CFG_BASE_PHYS 0xC0000000 +#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFEF13000) +#define IXP4XX_PCI_CFG_REGION_SIZE 0x00001000 /* - * Debug UART - * - * This is basically a remap of UART1 into a region that is section - * aligned so that it * can be used with the low-level debug code. + * Expansion BUS Configuration registers */ -#define IXP4XX_DEBUG_UART_BASE_PHYS (0xC8000000) -#define IXP4XX_DEBUG_UART_BASE_VIRT IOMEM(0xffb00000) -#define IXP4XX_DEBUG_UART_REGION_SIZE (0x00001000) +#define IXP4XX_EXP_CFG_BASE_PHYS 0xC4000000 +#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEF14000 +#define IXP4XX_EXP_CFG_REGION_SIZE 0x00001000 #define IXP4XX_EXP_CS0_OFFSET 0x00 #define IXP4XX_EXP_CS1_OFFSET 0x04 diff --git a/arch/arm/mach-ixp4xx/include/mach/qmgr.h b/arch/arm/mach-ixp4xx/include/mach/qmgr.h index 9e7cad2d54cb..4de8da536dbb 100644 --- a/arch/arm/mach-ixp4xx/include/mach/qmgr.h +++ b/arch/arm/mach-ixp4xx/include/mach/qmgr.h @@ -86,7 +86,7 @@ void qmgr_release_queue(unsigned int queue); static inline void qmgr_put_entry(unsigned int queue, u32 val) { - extern struct qmgr_regs __iomem *qmgr_regs; + struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; #if DEBUG_QMGR BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ @@ -99,7 +99,7 @@ static inline void qmgr_put_entry(unsigned int queue, u32 val) static inline u32 qmgr_get_entry(unsigned int queue) { u32 val; - extern struct qmgr_regs __iomem *qmgr_regs; + const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; val = __raw_readl(&qmgr_regs->acc[queue][0]); #if DEBUG_QMGR BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ @@ -112,14 +112,14 @@ static inline u32 qmgr_get_entry(unsigned int queue) static inline int __qmgr_get_stat1(unsigned int queue) { - extern struct qmgr_regs __iomem *qmgr_regs; + const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) >> ((queue & 7) << 2)) & 0xF; } static inline int __qmgr_get_stat2(unsigned int queue) { - extern struct qmgr_regs __iomem *qmgr_regs; + const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; BUG_ON(queue >= HALF_QUEUES); return (__raw_readl(&qmgr_regs->stat2[queue >> 4]) >> ((queue & 0xF) << 1)) & 0x3; @@ -145,7 +145,7 @@ static inline int qmgr_stat_empty(unsigned int queue) */ static inline int qmgr_stat_below_low_watermark(unsigned int queue) { - extern struct qmgr_regs __iomem *qmgr_regs; + const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; if (queue >= HALF_QUEUES) return (__raw_readl(&qmgr_regs->statne_h) >> (queue - HALF_QUEUES)) & 0x01; @@ -172,7 +172,7 @@ static inline int qmgr_stat_above_high_watermark(unsigned int queue) */ static inline int qmgr_stat_full(unsigned int queue) { - extern struct qmgr_regs __iomem *qmgr_regs; + const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; if (queue >= HALF_QUEUES) return (__raw_readl(&qmgr_regs->statf_h) >> (queue - HALF_QUEUES)) & 0x01; diff --git a/arch/arm/mach-ixp4xx/ixp4xx_npe.c b/arch/arm/mach-ixp4xx/ixp4xx_npe.c index a17ed79207a4..d4eb09a62863 100644 --- a/arch/arm/mach-ixp4xx/ixp4xx_npe.c +++ b/arch/arm/mach-ixp4xx/ixp4xx_npe.c @@ -116,7 +116,11 @@ /* NPE mailbox_status value for reset */ #define RESET_MBOX_STAT 0x0000F0F0 -const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" }; +#define NPE_A_FIRMWARE "NPE-A" +#define NPE_B_FIRMWARE "NPE-B" +#define NPE_C_FIRMWARE "NPE-C" + +const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE }; #define print_npe(pri, npe, fmt, ...) \ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__) @@ -724,6 +728,9 @@ module_exit(npe_cleanup_module); MODULE_AUTHOR("Krzysztof Halasa"); MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(NPE_A_FIRMWARE); +MODULE_FIRMWARE(NPE_B_FIRMWARE); +MODULE_FIRMWARE(NPE_C_FIRMWARE); EXPORT_SYMBOL(npe_names); EXPORT_SYMBOL(npe_running); diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c index 852f7c9f87d0..9d1b6b7c394c 100644 --- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c +++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c @@ -14,7 +14,7 @@ #include #include -struct qmgr_regs __iomem *qmgr_regs; +static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; static struct resource *mem_res; static spinlock_t qmgr_lock; static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ @@ -293,12 +293,6 @@ static int qmgr_init(void) if (mem_res == NULL) return -EBUSY; - qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); - if (qmgr_regs == NULL) { - err = -ENOMEM; - goto error_map; - } - /* reset qmgr registers */ for (i = 0; i < 4; i++) { __raw_writel(0x33333333, &qmgr_regs->stat1[i]); @@ -347,8 +341,6 @@ static int qmgr_init(void) error_irq2: free_irq(IRQ_IXP4XX_QM1, NULL); error_irq: - iounmap(qmgr_regs); -error_map: release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); return err; } @@ -359,7 +351,6 @@ static void qmgr_remove(void) free_irq(IRQ_IXP4XX_QM2, NULL); synchronize_irq(IRQ_IXP4XX_QM1); synchronize_irq(IRQ_IXP4XX_QM2); - iounmap(qmgr_regs); release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); } @@ -369,7 +360,6 @@ module_exit(qmgr_remove); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Krzysztof Halasa"); -EXPORT_SYMBOL(qmgr_regs); EXPORT_SYMBOL(qmgr_set_irq); EXPORT_SYMBOL(qmgr_enable_irq); EXPORT_SYMBOL(qmgr_disable_irq); diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c index ec544918b12c..74fc5a074fc4 100644 --- a/arch/arm/mach-kirkwood/pcie.c +++ b/arch/arm/mach-kirkwood/pcie.c @@ -207,14 +207,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys) return 1; } +/* + * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it + * is operating as a root complex this needs to be switched to + * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on + * the device. Decoding setup is handled by the orion code. + */ static void __devinit rc_pci_fixup(struct pci_dev *dev) { - /* - * Prevent enumeration of root complex. - */ if (dev->bus->parent == NULL && dev->devfn == 0) { int i; + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_HOST << 8; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index db98e7021f0d..0abd1c469887 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -473,12 +473,13 @@ int s3c2410_dma_enqueue(enum dma_ch channel, void *id, pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n", chan->number, __func__, buf); - if (chan->end == NULL) + if (chan->end == NULL) { pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n", chan->number, __func__, chan); - - chan->end->next = buf; - chan->end = buf; + } else { + chan->end->next = buf; + chan->end = buf; + } } /* if necessary, update the next buffer field */ diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 6d909faebf28..656a6f291a35 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -392,7 +392,7 @@ __SYSCALL(367, sys_fanotify_init) __SYSCALL(368, compat_sys_fanotify_mark_wrapper) __SYSCALL(369, sys_prlimit64) __SYSCALL(370, sys_name_to_handle_at) -__SYSCALL(371, sys_open_by_handle_at) +__SYSCALL(371, compat_sys_open_by_handle_at) __SYSCALL(372, sys_clock_adjtime) __SYSCALL(373, sys_syncfs) diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h new file mode 100644 index 000000000000..ecead15872a6 --- /dev/null +++ b/arch/c6x/include/asm/setup.h @@ -0,0 +1,33 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_SETUP_H +#define _ASM_C6X_SETUP_H + +#include + +#ifndef __ASSEMBLY__ +extern char c6x_command_line[COMMAND_LINE_SIZE]; + +extern int c6x_add_memory(phys_addr_t start, unsigned long size); + +extern unsigned long ram_start; +extern unsigned long ram_end; + +extern int c6x_num_cores; +extern unsigned int c6x_silicon_rev; +extern unsigned int c6x_devstat; +extern unsigned char c6x_fuse_mac[6]; + +extern void machine_init(unsigned long dt_ptr); +extern void time_init(void); + +#endif /* !__ASSEMBLY__ */ +#endif /* _ASM_C6X_SETUP_H */ diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index c312b424c433..e9bc2b2b8147 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild @@ -1,6 +1,8 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generic-y += kvm_para.h + header-y += byteorder.h header-y += kvm_para.h header-y += ptrace.h diff --git a/arch/c6x/include/uapi/asm/kvm_para.h b/arch/c6x/include/uapi/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/c6x/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/c6x/include/uapi/asm/setup.h b/arch/c6x/include/uapi/asm/setup.h index a01e31896fa9..ad9ac97a8dad 100644 --- a/arch/c6x/include/uapi/asm/setup.h +++ b/arch/c6x/include/uapi/asm/setup.h @@ -1,33 +1,6 @@ -/* - * Port on Texas Instruments TMS320C6x architecture - * - * Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated - * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_C6X_SETUP_H -#define _ASM_C6X_SETUP_H +#ifndef _UAPI_ASM_C6X_SETUP_H +#define _UAPI_ASM_C6X_SETUP_H #define COMMAND_LINE_SIZE 1024 -#ifndef __ASSEMBLY__ -extern char c6x_command_line[COMMAND_LINE_SIZE]; - -extern int c6x_add_memory(phys_addr_t start, unsigned long size); - -extern unsigned long ram_start; -extern unsigned long ram_end; - -extern int c6x_num_cores; -extern unsigned int c6x_silicon_rev; -extern unsigned int c6x_devstat; -extern unsigned char c6x_fuse_mac[6]; - -extern void machine_init(unsigned long dt_ptr); -extern void time_init(void); - -#endif /* !__ASSEMBLY__ */ -#endif /* _ASM_C6X_SETUP_H */ +#endif /* _UAPI_ASM_C6X_SETUP_H */ diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S index 5449c36018fe..0ed6157dd256 100644 --- a/arch/c6x/kernel/entry.S +++ b/arch/c6x/kernel/entry.S @@ -277,6 +277,8 @@ work_rescheduled: [A1] BNOP .S1 work_resched,5 work_notifysig: + ;; enable interrupts for do_notify_resume() + UNMASK_INT B2 B .S2 do_notify_resume LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag ADDKPC .S2 resume_userspace,B3,1 @@ -427,8 +429,7 @@ ENTRY(ret_from_kernel_execve) ENDPROC(ret_from_kernel_execve) ;; - ;; These are the interrupt handlers, responsible for calling __do_IRQ() - ;; int6 is used for syscalls (see _system_call entry) + ;; These are the interrupt handlers, responsible for calling c6x_do_IRQ() ;; .macro SAVE_ALL_INT SAVE_ALL IRP,ITSR diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 3847e5b9c601..3903e3d11f5a 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -111,7 +111,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1)) + if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1) == -EFAULT) goto badframe; return rval; diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index bd94946a18f3..ef99db994c2f 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -95,7 +95,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + /* + * There could be some standard sized pages in there, + * get them all. + */ + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b1fb7af3c350..cce3782c96c9 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -510,7 +510,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R3000A; __cpu_name[cpu] = "R3000A"; } - break; } else { c->cputype = CPU_R3000; __cpu_name[cpu] = "R3000"; diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index a6c133212003..9b00362f32f6 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -36,6 +36,11 @@ FEXPORT(ret_from_exception) FEXPORT(ret_from_irq) LONG_S s0, TI_REGS($28) FEXPORT(__ret_from_irq) +/* + * We can be coming here from a syscall done in the kernel space, + * e.g. a failed kernel_execve(). + */ +resume_userspace_check: LONG_L t0, PT_STATUS(sp) # returning to kernel mode? andi t0, t0, KU_USER beqz t0, resume_kernel @@ -162,7 +167,7 @@ work_notifysig: # deal with pending signals and move a0, sp li a1, 0 jal do_notify_resume # a2 already loaded - j resume_userspace + j resume_userspace_check FEXPORT(syscall_exit_partial) local_irq_disable # make sure need_resched doesn't diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f6ba8381ee01..86ec03f0e00c 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -397,14 +397,14 @@ EXPORT(sysn32_call_table) PTR sys_timerfd_create PTR compat_sys_timerfd_gettime /* 6285 */ PTR compat_sys_timerfd_settime - PTR sys_signalfd4 + PTR compat_sys_signalfd4 PTR sys_eventfd2 PTR sys_epoll_create1 PTR sys_dup3 /* 6290 */ PTR sys_pipe2 PTR sys_inotify_init1 - PTR sys_preadv - PTR sys_pwritev + PTR compat_sys_preadv + PTR compat_sys_pwritev PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ PTR sys_perf_event_open PTR sys_accept4 diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 4b9b935a070e..88e79ad6f811 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -120,18 +120,11 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; - int huge = is_vm_hugetlb_page(vma); ENTER_CRITICAL(flags); - if (huge) { - start = round_down(start, HPAGE_SIZE); - end = round_up(end, HPAGE_SIZE); - size = (end - start) >> HPAGE_SHIFT; - } else { - start = round_down(start, PAGE_SIZE << 1); - end = round_up(end, PAGE_SIZE << 1); - size = (end - start) >> (PAGE_SHIFT + 1); - } + start = round_down(start, PAGE_SIZE << 1); + end = round_up(end, PAGE_SIZE << 1); + size = (end - start) >> (PAGE_SHIFT + 1); if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); @@ -140,10 +133,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, int idx; write_c0_entryhi(start | newpid); - if (huge) - start += HPAGE_SIZE; - else - start += (PAGE_SIZE << 1); + start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 30110297f4f9..ddedc8a77861 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -84,7 +84,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp; sigset_t set; - stack_t st; /* * Since we stacked the signal on a dword boundary, @@ -104,11 +103,10 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; - if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) - goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - do_sigaltstack(&st, NULL, regs->sp); + if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) + goto badframe; return regs->gpr[11]; diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 3735abd7f8f6..cbf5d59d5d6a 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -60,7 +60,7 @@ ENTRY_SAME(fork_wrapper) ENTRY_SAME(read) ENTRY_SAME(write) - ENTRY_SAME(open) /* 5 */ + ENTRY_COMP(open) /* 5 */ ENTRY_SAME(close) ENTRY_SAME(waitpid) ENTRY_SAME(creat) diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index ad79b846535c..827e094a2f49 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -28,7 +28,7 @@ ENTRY(sys32_open_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int lgfr %r4,%r4 # int - jg sys_open # branch to system call + jg compat_sys_open # branch to system call ENTRY(sys32_close_wrapper) llgfr %r2,%r2 # unsigned int diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index c268bbf8b410..02353bde92d8 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -148,7 +148,6 @@ score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; - stack_t st; int sig; /* Always make any pending restarted system calls return -EINTR */ @@ -168,12 +167,10 @@ score_rt_sigreturn(struct pt_regs *regs) else if (sig) force_sig(sig, current); - if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) - goto badframe; - /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]); + if (do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs->regs[0]) == -EFAULT) + goto badframe; regs->is_syscall = 0; __asm__ __volatile__( diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 23853814bd17..d867cd95a622 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -347,7 +347,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, { struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP; sigset_t set; - stack_t __user st; long long ret; /* Always make any pending restarted system calls return -EINTR */ @@ -365,11 +364,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, goto badframe; regs->pc -= 4; - if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) - goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - do_sigaltstack(&st, NULL, REF_REG_SP); + if (do_sigaltstack(&frame->uc.uc_stack, NULL, REF_REG_SP) == -EFAULT) + goto badframe; return (int) ret; diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c index c0a798fcf030..bb7c95161d71 100644 --- a/arch/sparc/boot/piggyback.c +++ b/arch/sparc/boot/piggyback.c @@ -81,18 +81,18 @@ static void usage(void) static int start_line(const char *line) { - if (strcmp(line + 8, " T _start\n") == 0) + if (strcmp(line + 10, " _start\n") == 0) return 1; - else if (strcmp(line + 16, " T _start\n") == 0) + else if (strcmp(line + 18, " _start\n") == 0) return 1; return 0; } static int end_line(const char *line) { - if (strcmp(line + 8, " A _end\n") == 0) + if (strcmp(line + 10, " _end\n") == 0) return 1; - else if (strcmp (line + 16, " A _end\n") == 0) + else if (strcmp (line + 18, " _end\n") == 0) return 1; return 0; } @@ -100,8 +100,8 @@ static int end_line(const char *line) /* * Find address for start and end in System.map. * The file looks like this: - * f0004000 T _start - * f0379f79 A _end + * f0004000 ... _start + * f0379f79 ... _end * 1234567890123456 * ^coloumn 1 * There is support for 64 bit addresses too. diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 44025f4ba41f..8475a474273a 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -47,7 +47,7 @@ STUB: sra REG1, 0, REG1; \ sra REG4, 0, REG4 SIGN1(sys32_exit, sparc_exit, %o0) -SIGN1(sys32_exit_group, sys_exit_group, %o0) +SIGN1(sys32_exit_group, sparc_exit_group, %o0) SIGN1(sys32_wait4, compat_sys_wait4, %o2) SIGN1(sys32_creat, sys_creat, %o1) SIGN1(sys32_mknod, sys_mknod, %o1) diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 7f5f65d0b3fd..bf2347794e33 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -118,10 +118,20 @@ ret_from_syscall: ba,pt %xcc, ret_sys_call ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 + .globl sparc_exit_group + .type sparc_exit_group,#function +sparc_exit_group: + sethi %hi(sys_exit_group), %g7 + ba,pt %xcc, 1f + or %g7, %lo(sys_exit_group), %g7 + .size sparc_exit_group,.-sparc_exit_group + .globl sparc_exit .type sparc_exit,#function sparc_exit: - rdpr %pstate, %g2 + sethi %hi(sys_exit), %g7 + or %g7, %lo(sys_exit), %g7 +1: rdpr %pstate, %g2 wrpr %g2, PSTATE_IE, %pstate rdpr %otherwin, %g1 rdpr %cansave, %g3 @@ -129,7 +139,7 @@ sparc_exit: wrpr %g3, 0x0, %cansave wrpr %g0, 0x0, %otherwin wrpr %g2, 0x0, %pstate - ba,pt %xcc, sys_exit + jmpl %g7, %g0 stb %g0, [%g6 + TI_WSAVED] .size sparc_exit,.-sparc_exit diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 1c9af9fa38e9..017b74a63dcb 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -133,7 +133,7 @@ sys_call_table: /*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr /*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall - .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname + .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname /*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 3a8ece7d09ca..0d7103c9eff3 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -32,13 +32,14 @@ void flush_thread(void) "err = %d\n", ret); force_sig(SIGKILL, current); } + get_safe_registers(current_pt_regs()->regs.gp, + current_pt_regs()->regs.fp); __switch_mm(¤t->mm->context.id); } void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) { - get_safe_registers(regs->regs.gp, regs->regs.fp); PT_REGS_IP(regs) = eip; PT_REGS_SP(regs) = esp; current->ptrace &= ~PT_DTRACE; diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 66e5f0ef0523..79fd8a3418f9 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -12,6 +12,7 @@ header-y += mce.h header-y += msr-index.h header-y += msr.h header-y += mtrr.h +header-y += perf_regs.h header-y += posix_types_32.h header-y += posix_types_64.h header-y += posix_types_x32.h @@ -19,8 +20,10 @@ header-y += prctl.h header-y += processor-flags.h header-y += ptrace-abi.h header-y += sigcontext32.h +header-y += svm.h header-y += ucontext.h header-y += vm86.h +header-y += vmx.h header-y += vsyscall.h genhdr-y += unistd_32.h diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 831dbb9c6c02..41ab26ea6564 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk) typedef struct { int preload; } fpu_switch_t; /* - * FIXME! We could do a totally lazy restore, but we need to - * add a per-cpu "this was the task that last touched the FPU - * on this CPU" variable, and the task needs to have a "I last - * touched the FPU on this CPU" and check them. + * Must be run with preemption disabled: this clears the fpu_owner_task, + * on this CPU. * - * We don't do that yet, so "fpu_lazy_restore()" always returns - * false, but some day.. + * This will disable any lazy FPU state restore of the current FPU state, + * but if the current thread owns the FPU, it will still be saved by. */ +static inline void __cpu_disable_lazy_restore(unsigned int cpu) +{ + per_cpu(fpu_owner_task, cpu) = NULL; +} + static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) { return new == this_cpu_read_stable(fpu_owner_task) && diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 957a47aec64e..4dac2f68ed4a 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -292,8 +292,8 @@ default_entry: * be using the global pages. * * NOTE! If we are on a 486 we may have no cr4 at all! - * Specifically, cr4 exists if and only if CPUID exists, - * which in turn exists if and only if EFLAGS.ID exists. + * Specifically, cr4 exists if and only if CPUID exists + * and has flags other than the FPU flag set. */ movl $X86_EFLAGS_ID,%ecx pushl %ecx @@ -308,6 +308,11 @@ default_entry: testl %ecx,%eax jz 6f # No ID flag = no CPUID = no CR4 + movl $1,%eax + cpuid + andl $~1,%edx # Ignore CPUID.FPU + jz 6f # No flags or only CPUID.FPU = no CR4 + movl pa(mmu_cr4_features),%eax movl %eax,%cr4 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 5e0596b0632e..974b67e46dd0 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1541,6 +1541,13 @@ void syscall_trace_leave(struct pt_regs *regs) { bool step; + /* + * We may come here right after calling schedule_user() + * or do_notify_resume(), in which case we can be in RCU + * user mode. + */ + rcu_user_exit(); + audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c80a33bc528b..f3e2ec878b8c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -68,6 +68,8 @@ #include #include #include +#include +#include #include #include #include @@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + /* the FPU context is blank, nobody can own it */ + __cpu_disable_lazy_restore(cpu); + err = do_boot_cpu(apicid, cpu, tidle); if (err) { pr_debug("do_boot_cpu failed %d\n", err); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 39171cb307ea..bba39bfa1c4b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) _ASM_EXTABLE(1b, 3b) \ : "=m" ((ctxt)->eflags), "=&r" (_tmp), \ "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \ - : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \ - "a" (*rax), "d" (*rdx)); \ + : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \ } while (0) /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 89b30f32ba68..ff7bb8a42ed6 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -1961,6 +1961,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { res = loader_verify(lb, dev, rec); if (res) break; + rec = ihex_next_binrec(rec); } release_firmware(fw); if (!res) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index fbd9b2b850ef..c58ea9b80b1a 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -127,12 +127,12 @@ config HW_RANDOM_VIA If unsure, say Y. config HW_RANDOM_IXP4XX - tristate "Intel IXP4xx NPU HW Random Number Generator support" + tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" depends on HW_RANDOM && ARCH_IXP4XX default HW_RANDOM ---help--- - This driver provides kernel-side support for the Random - Number Generator hardware found on the Intel IXP4xx NPU. + This driver provides kernel-side support for the Pseudo-Random + Number Generator hardware found on the Intel IXP45x/46x NPU. To compile this driver as a module, choose M here: the module will be called ixp4xx-rng. diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c index 263567f5f392..beec1627db3c 100644 --- a/drivers/char/hw_random/ixp4xx-rng.c +++ b/drivers/char/hw_random/ixp4xx-rng.c @@ -45,6 +45,9 @@ static int __init ixp4xx_rng_init(void) void __iomem * rng_base; int err; + if (!cpu_is_ixp46x()) /* includes IXP455 */ + return -ENOSYS; + rng_base = ioremap(0x70002100, 4); if (!rng_base) return -ENOMEM; @@ -68,5 +71,5 @@ module_init(ixp4xx_rng_init); module_exit(ixp4xx_rng_exit); MODULE_AUTHOR("Deepak Saxena "); -MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx"); +MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 0bb207eaef2f..54a3a6d09819 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd, static const struct file_operations raw_fops = { .read = do_sync_read, - .aio_read = blkdev_aio_read, + .aio_read = generic_file_aio_read, .write = do_sync_write, .aio_write = blkdev_aio_write, .fsync = blkdev_fsync, diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 308c7fb92a60..f6644f59fd9d 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -224,7 +224,7 @@ config CRYPTO_DEV_TALITOS config CRYPTO_DEV_IXP4XX tristate "Driver for IXP4xx crypto hardware acceleration" - depends on ARCH_IXP4XX + depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE select CRYPTO_DES select CRYPTO_ALGAPI select CRYPTO_AUTHENC diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8f3f74ce8c7f..21180d6cad6e 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -750,12 +750,12 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, } if (cipher_cfg & MOD_AES) { switch (key_len) { - case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break; - case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break; - case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break; - default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; + case 16: keylen_cfg = MOD_AES128; break; + case 24: keylen_cfg = MOD_AES192; break; + case 32: keylen_cfg = MOD_AES256; break; + default: + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; } cipher_cfg |= keylen_cfg; } else if (cipher_cfg & MOD_3DES) { diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 90f0b730e9bb..75c0a1a85fc3 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -416,10 +416,18 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, dimm->cschannel = chn; /* Increment csrow location */ - row++; - if (row == tot_csrows) { - row = 0; + if (layers[0].is_virt_csrow) { chn++; + if (chn == tot_channels) { + chn = 0; + row++; + } + } else { + row++; + if (row == tot_csrows) { + row = 0; + chn++; + } } /* Increment dimm location */ diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index a09d0667f72a..9d669cd43618 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -197,8 +197,8 @@ static const char *ferr_fat_fbd_name[] = { [0] = "Memory Write error on non-redundant retry or " "FBD configuration Write error on retry", }; -#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28)) -#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)) +#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) +#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) #define FERR_NF_FBD 0xa0 static const char *ferr_nf_fbd_name[] = { @@ -225,7 +225,7 @@ static const char *ferr_nf_fbd_name[] = { [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", [0] = "Uncorrectable Data ECC on Replay", }; -#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28)) +#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ @@ -464,7 +464,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) errnum = find_first_bit(&errors, ARRAY_SIZE(ferr_nf_fbd_name)); specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); - branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; + branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, REDMEMA, &syndrome); diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 3672101023bd..10c8c00d6469 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -816,7 +816,7 @@ static ssize_t i7core_inject_store_##param( \ struct device_attribute *mattr, \ const char *data, size_t count) \ { \ - struct mem_ctl_info *mci = to_mci(dev); \ + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ struct i7core_pvt *pvt; \ long value; \ int rc; \ @@ -845,7 +845,7 @@ static ssize_t i7core_inject_show_##param( \ struct device_attribute *mattr, \ char *data) \ { \ - struct mem_ctl_info *mci = to_mci(dev); \ + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ struct i7core_pvt *pvt; \ \ pvt = mci->pvt_info; \ @@ -1052,7 +1052,7 @@ static ssize_t i7core_show_counter_##param( \ struct device_attribute *mattr, \ char *data) \ { \ - struct mem_ctl_info *mci = to_mci(dev); \ + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ struct i7core_pvt *pvt = mci->pvt_info; \ \ edac_dbg(1, "\n"); \ diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 069e26c11c4f..a98020409fa9 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c @@ -370,10 +370,6 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) static void i82975x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, void __iomem *mch_window) { - static const char *labels[4] = { - "DIMM A1", "DIMM A2", - "DIMM B1", "DIMM B2" - }; struct csrow_info *csrow; unsigned long last_cumul_size; u8 value; @@ -423,9 +419,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, dimm = mci->csrows[index]->channels[chan]->dimm; dimm->nr_pages = nr_pages / csrow->nr_channels; - strncpy(csrow->channels[chan]->dimm->label, - labels[(index >> 1) + (chan * 2)], - EDAC_MC_LABEL_LEN); + + snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d", + (chan == 0) ? 'A' : 'B', + index); dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ dimm->dtype = i82975x_dram_type(mch_window, index); dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 241ad1eeec64..f2df06c603f7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -226,6 +226,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) * already updated or not by exynos_drm_encoder_dpms function. */ exynos_encoder->updated = true; + + /* + * In case of setcrtc, there is no way to update encoder's dpms + * so update it here. + */ + exynos_encoder->dpms = DRM_MODE_DPMS_ON; } static void exynos_drm_encoder_disable(struct drm_encoder *encoder) @@ -507,6 +513,6 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) * because the setting for disabling the overlay will be updated * at vsync. */ - if (overlay_ops->wait_for_vblank) + if (overlay_ops && overlay_ops->wait_for_vblank) overlay_ops->wait_for_vblank(manager->dev); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 67eb6ba56edf..e7466c4414cb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -87,7 +87,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; fbi->screen_base = buffer->kvaddr + offset; - fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); + fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) + + offset); fbi->screen_size = size; fbi->fix.smem_len = size; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 130a2b510d4a..e08478f19f1a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -61,11 +61,11 @@ struct fimd_driver_data { unsigned int timing_base; }; -struct fimd_driver_data exynos4_fimd_driver_data = { +static struct fimd_driver_data exynos4_fimd_driver_data = { .timing_base = 0x0, }; -struct fimd_driver_data exynos5_fimd_driver_data = { +static struct fimd_driver_data exynos5_fimd_driver_data = { .timing_base = 0x20000, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 60b877a388c2..862ca1eb2102 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; plane->crtc = crtc; - plane->fb = crtc->fb; exynos_plane_commit(plane); exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 107f09befe92..9b285da4449b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1796,7 +1796,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) */ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; gfp = mapping_gfp_mask(mapping); - gfp |= __GFP_NORETRY | __GFP_NOWARN; + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp &= ~(__GFP_IO | __GFP_WAIT); for_each_sg(st->sgl, sg, page_count, i) { page = shmem_read_mapping_page_gfp(mapping, i, gfp); @@ -1809,7 +1809,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. */ - gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); + gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); gfp |= __GFP_IO | __GFP_WAIT; i915_gem_shrink_all(dev_priv); @@ -1817,7 +1817,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (IS_ERR(page)) goto err_pages; - gfp |= __GFP_NORETRY | __GFP_NOWARN; + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp &= ~(__GFP_IO | __GFP_WAIT); } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 0ed6baff4b0c..56846ed5ee55 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) edp = find_section(bdb, BDB_EDP); if (!edp) { - if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { - DRM_DEBUG_KMS("No eDP BDB found but eDP panel " - "supported, assume %dbpp panel color " - "depth.\n", - dev_priv->edp.bpp); - } + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) + DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); return; } @@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->lvds_use_ssc = 1; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); - - /* eDP data */ - dev_priv->edp.bpp = 18; } static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4154bcd7a070..b426d44a2b05 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3845,7 +3845,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, /* Use VBT settings if we have an eDP panel */ unsigned int edp_bpc = dev_priv->edp.bpp / 3; - if (edp_bpc < display_bpc) { + if (edp_bpc && edp_bpc < display_bpc) { DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); display_bpc = edp_bpc; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 72f41aaa71ff..442968f8b201 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2373,15 +2373,9 @@ int intel_enable_rc6(const struct drm_device *dev) if (i915_enable_rc6 >= 0) return i915_enable_rc6; - if (INTEL_INFO(dev)->gen == 5) { -#ifdef CONFIG_INTEL_IOMMU - /* Disable rc6 on ilk if VT-d is on. */ - if (intel_iommu_gfx_mapped) - return false; -#endif - DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); - return INTEL_RC6_ENABLE; - } + /* Disable RC6 on Ironlake */ + if (INTEL_INFO(dev)->gen == 5) + return 0; if (IS_HASWELL(dev)) { DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index c600fb06e25e..a6ac0b416964 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2201,7 +2201,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } - intel_sdvo->base.cloneable = true; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) @@ -2232,7 +2231,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; - intel_sdvo->base.cloneable = false; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); @@ -2275,8 +2273,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } - intel_sdvo->base.cloneable = true; - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; @@ -2307,9 +2303,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */ - intel_sdvo->base.cloneable = false; - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; @@ -2721,6 +2714,16 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) goto err_output; } + /* + * Cloning SDVO with anything is often impossible, since the SDVO + * encoder can request a special input timing mode. And even if that's + * not the case we have evidence that cloning a plain unscaled mode with + * VGA doesn't really work. Furthermore the cloning flags are way too + * simplistic anyway to express such constraints, so just give up on + * cloning for SDVO encoders. + */ + intel_sdvo->base.cloneable = false; + /* Only enable the hotplug irq if we need it, to work around noisy * hotplug lines. */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 3bce0299f64a..24d932f53203 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1696,42 +1696,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) return ATOM_PPLL2; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; - } else if (ASIC_IS_AVIVO(rdev)) { - /* in DP mode, the DP ref clock can come from either PPLL - * depending on the asic: - * DCE3: PPLL1 or PPLL2 - */ - if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { - /* use the same PPLL for all DP monitors */ - pll = radeon_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = radeon_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - /* all other cases */ - pll_in_use = radeon_get_pll_use_mask(crtc); - /* the order shouldn't matter here, but we probably - * need this until we have atomic modeset - */ - if (rdev->flags & RADEON_IS_IGP) { - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - } else { - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - } - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; } else { /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ + /* some atombios (observed in some DCE2/DCE3) code have a bug, + * the matching btw pll and crtc is done through + * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the + * pll (1 or 2) to select which register to write. ie if using + * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2 + * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to + * choose which value to write. Which is reverse order from + * register logic. So only case that works is when pllid is + * same as crtcid or when both pll and crtc are enabled and + * both use same clock. + * + * So just return crtc id as if crtc and pll were hard linked + * together even if they aren't + */ return radeon_crtc->crtc_id; } } diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c index 443ad64b7f2a..d88d9be1d1b7 100644 --- a/drivers/input/matrix-keymap.c +++ b/drivers/input/matrix-keymap.c @@ -23,6 +23,7 @@ #include #include #include +#include #include static bool matrix_keypad_map_key(struct input_dev *input_dev, @@ -161,3 +162,5 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, return 0; } EXPORT_SYMBOL(matrix_keypad_build_keymap); + +MODULE_LICENSE("GPL"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 636bae0405e8..a0f73092176e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -963,7 +963,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) struct r1conf *conf = mddev->private; struct bio *bio; - if (from_schedule) { + if (from_schedule || current->bio_list) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->pending_bio_list, &plug->pending); conf->pending_count += plug->pending_cnt; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0d5d0ff2c0f7..c9acbd717131 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1069,7 +1069,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) struct r10conf *conf = mddev->private; struct bio *bio; - if (from_schedule) { + if (from_schedule || current->bio_list) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->pending_bio_list, &plug->pending); conf->pending_count += plug->pending_cnt; diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index 047f0f0434ec..c267c57c76fd 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -657,8 +657,7 @@ static int gsc_m2m_release(struct file *file) pr_debug("pid: %d, state: 0x%lx, refcnt= %d", task_pid_nr(current), gsc->state, gsc->m2m.refcnt); - if (mutex_lock_interruptible(&gsc->lock)) - return -ERESTARTSYS; + mutex_lock(&gsc->lock); v4l2_m2m_ctx_release(ctx->m2m_ctx); gsc_ctrls_delete(ctx); @@ -732,6 +731,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc) gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops; gsc->vdev.release = video_device_release_empty; gsc->vdev.lock = &gsc->lock; + gsc->vdev.vfl_dir = VFL_DIR_M2M; snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m", GSC_MODULE_NAME, gsc->id); diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.h b/drivers/media/platform/exynos-gsc/gsc-regs.h index 533e9947a925..4678f9a6a4fd 100644 --- a/drivers/media/platform/exynos-gsc/gsc-regs.h +++ b/drivers/media/platform/exynos-gsc/gsc-regs.h @@ -40,10 +40,10 @@ #define GSC_IN_ROT_YFLIP (2 << 16) #define GSC_IN_ROT_XFLIP (1 << 16) #define GSC_IN_RGB_TYPE_MASK (3 << 14) -#define GSC_IN_RGB_HD_WIDE (3 << 14) -#define GSC_IN_RGB_HD_NARROW (2 << 14) -#define GSC_IN_RGB_SD_WIDE (1 << 14) -#define GSC_IN_RGB_SD_NARROW (0 << 14) +#define GSC_IN_RGB_HD_NARROW (3 << 14) +#define GSC_IN_RGB_HD_WIDE (2 << 14) +#define GSC_IN_RGB_SD_NARROW (1 << 14) +#define GSC_IN_RGB_SD_WIDE (0 << 14) #define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13) #define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13) #define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13) @@ -85,10 +85,10 @@ #define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24) #define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24) #define GSC_OUT_RGB_TYPE_MASK (3 << 10) -#define GSC_OUT_RGB_HD_NARROW (3 << 10) -#define GSC_OUT_RGB_HD_WIDE (2 << 10) -#define GSC_OUT_RGB_SD_NARROW (1 << 10) -#define GSC_OUT_RGB_SD_WIDE (0 << 10) +#define GSC_OUT_RGB_HD_WIDE (3 << 10) +#define GSC_OUT_RGB_HD_NARROW (2 << 10) +#define GSC_OUT_RGB_SD_WIDE (1 << 10) +#define GSC_OUT_RGB_SD_NARROW (0 << 10) #define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9) #define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9) #define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9) diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c index 0d0aca500876..fdb6740248a7 100644 --- a/drivers/media/platform/s5p-fimc/fimc-capture.c +++ b/drivers/media/platform/s5p-fimc/fimc-capture.c @@ -556,8 +556,7 @@ static int fimc_capture_close(struct file *file) dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); - if (mutex_lock_interruptible(&fimc->lock)) - return -ERESTARTSYS; + mutex_lock(&fimc->lock); if (--fimc->vid_cap.refcnt == 0) { clear_bit(ST_CAPT_BUSY, &fimc->state); @@ -1783,9 +1782,13 @@ static int fimc_capture_subdev_registered(struct v4l2_subdev *sd) if (ret) return ret; + fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd); + ret = fimc_register_capture_device(fimc, sd->v4l2_dev); - if (ret) + if (ret) { fimc_unregister_m2m_device(fimc); + fimc->pipeline_ops = NULL; + } return ret; } @@ -1802,6 +1805,7 @@ static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd) if (video_is_registered(&fimc->vid_cap.vfd)) { video_unregister_device(&fimc->vid_cap.vfd); media_entity_cleanup(&fimc->vid_cap.vfd.entity); + fimc->pipeline_ops = NULL; } kfree(fimc->vid_cap.ctx); fimc->vid_cap.ctx = NULL; diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c index 9db246bed841..1b309a72f09f 100644 --- a/drivers/media/platform/s5p-fimc/fimc-lite.c +++ b/drivers/media/platform/s5p-fimc/fimc-lite.c @@ -491,8 +491,7 @@ static int fimc_lite_close(struct file *file) struct fimc_lite *fimc = video_drvdata(file); int ret; - if (mutex_lock_interruptible(&fimc->lock)) - return -ERESTARTSYS; + mutex_lock(&fimc->lock); if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) { clear_bit(ST_FLITE_IN_USE, &fimc->state); @@ -1263,10 +1262,12 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd) return ret; video_set_drvdata(vfd, fimc); + fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd); ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); if (ret < 0) { media_entity_cleanup(&vfd->entity); + fimc->pipeline_ops = NULL; return ret; } @@ -1285,6 +1286,7 @@ static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd) if (video_is_registered(&fimc->vfd)) { video_unregister_device(&fimc->vfd); media_entity_cleanup(&fimc->vfd.entity); + fimc->pipeline_ops = NULL; } } diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c index 26bcf4bc4209..1d21da4bd24b 100644 --- a/drivers/media/platform/s5p-fimc/fimc-m2m.c +++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c @@ -728,8 +728,7 @@ static int fimc_m2m_release(struct file *file) dbg("pid: %d, state: 0x%lx, refcnt= %d", task_pid_nr(current), fimc->state, fimc->m2m.refcnt); - if (mutex_lock_interruptible(&fimc->lock)) - return -ERESTARTSYS; + mutex_lock(&fimc->lock); v4l2_m2m_ctx_release(ctx->m2m_ctx); fimc_ctrls_delete(ctx); diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c index 9bd5dd416220..1bd5678cfeb9 100644 --- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c +++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c @@ -352,6 +352,7 @@ static int fimc_register_callback(struct device *dev, void *p) sd = &fimc->vid_cap.subdev; sd->grp_id = FIMC_GROUP_ID; + v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (ret) { @@ -360,7 +361,6 @@ static int fimc_register_callback(struct device *dev, void *p) return ret; } - fimc->pipeline_ops = &fimc_pipeline_ops; fmd->fimc[fimc->id] = fimc; return 0; } @@ -375,6 +375,7 @@ static int fimc_lite_register_callback(struct device *dev, void *p) return 0; fimc->subdev.grp_id = FLITE_GROUP_ID; + v4l2_set_subdev_hostdata(&fimc->subdev, (void *)&fimc_pipeline_ops); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev); if (ret) { @@ -384,7 +385,6 @@ static int fimc_lite_register_callback(struct device *dev, void *p) return ret; } - fimc->pipeline_ops = &fimc_pipeline_ops; fmd->fimc_lite[fimc->index] = fimc; return 0; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 130f4ac8649e..3afe879d54d7 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -381,11 +381,8 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream, dev); if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC && - s5p_mfc_hw_call(dev->mfc_ops, - get_dec_frame_type, dev) == - S5P_FIMV_DECODE_FRAME_P_FRAME - && ctx->consumed_stream + STUFF_BYTE < - src_buf->b->v4l2_planes[0].bytesused) { + ctx->consumed_stream + STUFF_BYTE < + src_buf->b->v4l2_planes[0].bytesused) { /* Run MFC again on the same buffer */ mfc_debug(2, "Running again the same buffer\n"); ctx->after_packed_pb = 1; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 50b5bee3c44e..3a8cfd9fc1bd 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -1762,7 +1762,7 @@ int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev) int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev) { - return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6); + return mfc_read(dev, S5P_FIMV_D_DECODED_LUMA_ADDR_V6); } int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev) diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index a54dd5d7a5f9..c9ec725884e5 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -373,18 +373,25 @@ static struct sdhci_ops sdhci_s3c_ops = { static void sdhci_s3c_notify_change(struct platform_device *dev, int state) { struct sdhci_host *host = platform_get_drvdata(dev); + struct sdhci_s3c *sc = sdhci_priv(host); unsigned long flags; if (host) { spin_lock_irqsave(&host->lock, flags); if (state) { dev_dbg(&dev->dev, "card inserted.\n"); +#ifdef CONFIG_PM_RUNTIME + clk_prepare_enable(sc->clk_io); +#endif host->flags &= ~SDHCI_DEVICE_DEAD; host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; } else { dev_dbg(&dev->dev, "card removed.\n"); host->flags |= SDHCI_DEVICE_DEAD; host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; +#ifdef CONFIG_PM_RUNTIME + clk_disable_unprepare(sc->clk_io); +#endif } tasklet_schedule(&host->card_tasklet); spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index d25bc97dc5c6..7eaee3eeb6b2 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1104,7 +1104,6 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) { struct sh_mmcif_host *host = dev_id; struct mmc_request *mrq = host->mrq; - struct mmc_data *data = mrq->data; cancel_delayed_work_sync(&host->timeout_work); @@ -1152,13 +1151,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) case MMCIF_WAIT_FOR_READ_END: case MMCIF_WAIT_FOR_WRITE_END: if (host->sd_error) - data->error = sh_mmcif_error_manage(host); + mrq->data->error = sh_mmcif_error_manage(host); break; default: BUG(); } if (host->wait_for != MMCIF_WAIT_FOR_STOP) { + struct mmc_data *data = mrq->data; if (!mrq->cmd->error && data && !data->error) data->bytes_xfered = data->blocks * data->blksz; @@ -1231,10 +1231,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) host->sd_error = true; dev_dbg(&host->pd->dev, "int err state = %08x\n", state); } - if (host->state == STATE_IDLE) { - dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state); - return IRQ_HANDLED; - } if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { if (!host->dma_active) return IRQ_WAKE_THREAD; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index da7b44998b40..2144f611196e 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -498,7 +498,7 @@ out: * @ubi: UBI device description object * * This function returns a physical eraseblock in case of success and a - * negative error code in case of failure. Might sleep. + * negative error code in case of failure. */ static int __wl_get_peb(struct ubi_device *ubi) { @@ -540,13 +540,6 @@ retry: * ubi_wl_get_peb() after removing e from the pool. */ prot_queue_add(ubi, e); #endif - err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, - ubi->peb_size - ubi->vid_hdr_aloffset); - if (err) { - ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum); - return err; - } - return e->pnum; } @@ -679,17 +672,30 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) #else static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) { - return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); + struct ubi_wl_entry *e; + + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); + self_check_in_wl_tree(ubi, e, &ubi->free); + rb_erase(&e->u.rb, &ubi->free); + + return e; } int ubi_wl_get_peb(struct ubi_device *ubi) { - int peb; + int peb, err; spin_lock(&ubi->wl_lock); peb = __wl_get_peb(ubi); spin_unlock(&ubi->wl_lock); + err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, + ubi->peb_size - ubi->vid_hdr_aloffset); + if (err) { + ubi_err("new PEB %d does not contain all 0xFF bytes", peb); + return err; + } + return peb; } #endif diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5f5b69f37d2e..a7d47350ea4b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3459,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) /*-------------------------- Device entry points ----------------------------*/ +static void bond_work_init_all(struct bonding *bond) +{ + INIT_DELAYED_WORK(&bond->mcast_work, + bond_resend_igmp_join_requests_delayed); + INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); + INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) + INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); + else + INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); + INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); +} + +static void bond_work_cancel_all(struct bonding *bond) +{ + cancel_delayed_work_sync(&bond->mii_work); + cancel_delayed_work_sync(&bond->arp_work); + cancel_delayed_work_sync(&bond->alb_work); + cancel_delayed_work_sync(&bond->ad_work); + cancel_delayed_work_sync(&bond->mcast_work); +} + static int bond_open(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -3481,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev) } read_unlock(&bond->lock); - INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); + bond_work_init_all(bond); if (bond_is_lb(bond)) { /* bond_alb_initialize must be called before the timer * is started. */ - if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { - /* something went wrong - fail the open operation */ + if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) return -ENOMEM; - } - - INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); queue_delayed_work(bond->wq, &bond->alb_work, 0); } - if (bond->params.miimon) { /* link check interval, in milliseconds. */ - INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); + if (bond->params.miimon) /* link check interval, in milliseconds. */ queue_delayed_work(bond->wq, &bond->mii_work, 0); - } if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) - INIT_DELAYED_WORK(&bond->arp_work, - bond_activebackup_arp_mon); - else - INIT_DELAYED_WORK(&bond->arp_work, - bond_loadbalance_arp_mon); - queue_delayed_work(bond->wq, &bond->arp_work, 0); if (bond->params.arp_validate) bond->recv_probe = bond_arp_rcv; } if (bond->params.mode == BOND_MODE_8023AD) { - INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); queue_delayed_work(bond->wq, &bond->ad_work, 0); /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; @@ -3530,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); write_lock_bh(&bond->lock); - bond->send_peer_notif = 0; - write_unlock_bh(&bond->lock); - if (bond->params.miimon) { /* link check interval, in milliseconds. */ - cancel_delayed_work_sync(&bond->mii_work); - } - - if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ - cancel_delayed_work_sync(&bond->arp_work); - } - - switch (bond->params.mode) { - case BOND_MODE_8023AD: - cancel_delayed_work_sync(&bond->ad_work); - break; - case BOND_MODE_TLB: - case BOND_MODE_ALB: - cancel_delayed_work_sync(&bond->alb_work); - break; - default: - break; - } - - if (delayed_work_pending(&bond->mcast_work)) - cancel_delayed_work_sync(&bond->mcast_work); - + bond_work_cancel_all(bond); if (bond_is_lb(bond)) { /* Must be called only after all * slaves have been released @@ -4436,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->features |= bond_dev->hw_features; } -static void bond_work_cancel_all(struct bonding *bond) -{ - if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) - cancel_delayed_work_sync(&bond->mii_work); - - if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) - cancel_delayed_work_sync(&bond->arp_work); - - if (bond->params.mode == BOND_MODE_ALB && - delayed_work_pending(&bond->alb_work)) - cancel_delayed_work_sync(&bond->alb_work); - - if (bond->params.mode == BOND_MODE_8023AD && - delayed_work_pending(&bond->ad_work)) - cancel_delayed_work_sync(&bond->ad_work); - - if (delayed_work_pending(&bond->mcast_work)) - cancel_delayed_work_sync(&bond->mcast_work); -} - /* * Destroy a bonding device. * Must be under rtnl_lock when this function is called. @@ -4706,12 +4670,13 @@ static int bond_check_params(struct bond_params *params) arp_ip_count++) { /* not complete check, but should be good enough to catch mistakes */ - if (!isdigit(arp_ip_target[arp_ip_count][0])) { + __be32 ip = in_aton(arp_ip_target[arp_ip_count]); + if (!isdigit(arp_ip_target[arp_ip_count][0]) || + ip == 0 || ip == htonl(INADDR_BROADCAST)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", arp_ip_target[arp_ip_count]); arp_interval = 0; } else { - __be32 ip = in_aton(arp_ip_target[arp_ip_count]); arp_target[arp_ip_count] = ip; } } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index ef8d2a080d17..1877ed7ca086 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no arp_interval value specified.\n", bond->dev->name); @@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d, pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", bond->dev->name, bond->dev->name); bond->params.miimon = 0; - if (delayed_work_pending(&bond->mii_work)) { - cancel_delayed_work(&bond->mii_work); - flush_workqueue(bond->wq); - } } if (!bond->params.arp_targets[0]) { pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", @@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d, * timer will get fired off when the open function * is called. */ - if (!delayed_work_pending(&bond->arp_work)) { - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) - INIT_DELAYED_WORK(&bond->arp_work, - bond_activebackup_arp_mon); - else - INIT_DELAYED_WORK(&bond->arp_work, - bond_loadbalance_arp_mon); - - queue_delayed_work(bond->wq, &bond->arp_work, 0); - } + cancel_delayed_work_sync(&bond->mii_work); + queue_delayed_work(bond->wq, &bond->arp_work, 0); } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, @@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no miimon value specified.\n", bond->dev->name); @@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d, bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; } - if (delayed_work_pending(&bond->arp_work)) { - cancel_delayed_work(&bond->arp_work); - flush_workqueue(bond->wq); - } } if (bond->dev->flags & IFF_UP) { @@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d, * timer will get fired off when the open function * is called. */ - if (!delayed_work_pending(&bond->mii_work)) { - INIT_DELAYED_WORK(&bond->mii_work, - bond_mii_monitor); - queue_delayed_work(bond->wq, - &bond->mii_work, 0); - } + cancel_delayed_work_sync(&bond->arp_work); + queue_delayed_work(bond->wq, &bond->mii_work, 0); } } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, @@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, goto out; } + read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (!bond_is_active_slave(slave)) { if (new_value) @@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, slave->inactive = 1; } } + read_unlock(&bond->lock); out: return ret; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 86f26a1ede4c..25723d8ee201 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, mc->pdev->dev.can.state = new_state; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts->hwtstamp = timeval_to_ktime(tv); } netif_rx(skb); @@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) struct sk_buff *skb; struct can_frame *cf; struct timeval tv; + struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) @@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) /* convert timestamp into kernel time */ peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); /* push the skb */ netif_rx(skb); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index e1626d92511a..30d79bfa5b10 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct can_frame *can_frame; struct sk_buff *skb; struct timeval tv; + struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(netdev, &can_frame); if (!skb) @@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, memcpy(can_frame->data, rx->data, can_frame->can_dlc); peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; @@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, u8 err_mask = 0; struct sk_buff *skb; struct timeval tv; + struct skb_shared_hwtstamps *hwts; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) @@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, dev->can.state = new_state; peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5d36795877cb..b799ab12a291 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -237,7 +237,7 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, if (err) return err; - memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate)); + memcpy(priv->maxrate, tmp, sizeof(priv->maxrate)); return 0; } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index b01f83a044c4..609125a249d9 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1060,17 +1060,22 @@ static int cp_init_rings (struct cp_private *cp) static int cp_alloc_rings (struct cp_private *cp) { + struct device *d = &cp->pdev->dev; void *mem; + int rc; - mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, - &cp->ring_dma, GFP_KERNEL); + mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; cp->rx_ring = mem; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; - return cp_init_rings(cp); + rc = cp_init_rings(cp); + if (rc < 0) + dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + + return rc; } static void cp_clean_rings (struct cp_private *cp) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d44cca327588..ad86660fb8f9 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1794,10 +1794,12 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GRO; - dev->hw_features = NETIF_F_HW_VLAN_TX | + dev->hw_features = TEAM_VLAN_FEATURES | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); dev->features |= dev->hw_features; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3b566fa0f8e6..1ea91f4237f0 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -385,6 +385,7 @@ static const struct usb_device_id products[] = { }, /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index e9a3da588e95..760776b3d66c 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -1365,7 +1365,7 @@ static int __devinit hss_init_one(struct platform_device *pdev) platform_set_drvdata(pdev, port); - netdev_info(dev, "HSS-%i\n", port->id); + netdev_info(dev, "initialized\n"); return 0; err_free_netdev: diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 10896393e5a0..2830ea290502 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv, * As a consequence, it's not as complicated as it sounds, just add * any lower rates to the ACK rate bitmap. */ - if (IWL_RATE_11M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; - if (IWL_RATE_5M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; - if (IWL_RATE_2M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_11M_INDEX < lowest_present_cck) + cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_5M_INDEX < lowest_present_cck) + cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_2M_INDEX < lowest_present_cck) + cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; /* 1M already there or needed so always add */ cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index e7a4780e93db..9e198e590675 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -120,15 +120,11 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, return vq; } -static void rproc_virtio_del_vqs(struct virtio_device *vdev) +static void __rproc_virtio_del_vqs(struct virtio_device *vdev) { struct virtqueue *vq, *n; - struct rproc *rproc = vdev_to_rproc(vdev); struct rproc_vring *rvring; - /* power down the remote processor before deleting vqs */ - rproc_shutdown(rproc); - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { rvring = vq->priv; rvring->vq = NULL; @@ -137,6 +133,16 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) } } +static void rproc_virtio_del_vqs(struct virtio_device *vdev) +{ + struct rproc *rproc = vdev_to_rproc(vdev); + + /* power down the remote processor before deleting vqs */ + rproc_shutdown(rproc); + + __rproc_virtio_del_vqs(vdev); +} + static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], @@ -163,7 +169,7 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; error: - rproc_virtio_del_vqs(vdev); + __rproc_virtio_del_vqs(vdev); return ret; } diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index 7a82337e4dee..073108dcf9e7 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev) static int __devexit tps65910_rtc_remove(struct platform_device *pdev) { /* leave rtc running, but disable irqs */ - struct rtc_device *rtc = platform_get_drvdata(pdev); + struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev); - tps65910_rtc_alarm_irq_enable(&rtc->dev, 0); + tps65910_rtc_alarm_irq_enable(&pdev->dev, 0); - rtc_device_unregister(rtc); + rtc_device_unregister(tps_rtc->rtc); return 0; } diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 16b7a72a70c4..3b2365c8eab2 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1276,7 +1276,7 @@ struct megasas_evt_detail { } __attribute__ ((packed)); struct megasas_aen_event { - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct megasas_instance *instance; }; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d2c5366aff7f..e4f2baacf1e1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2060,9 +2060,9 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) } else { ev->instance = instance; instance->ev = ev; - INIT_WORK(&ev->hotplug_work, megasas_aen_polling); - schedule_delayed_work( - (struct delayed_work *)&ev->hotplug_work, 0); + INIT_DELAYED_WORK(&ev->hotplug_work, + megasas_aen_polling); + schedule_delayed_work(&ev->hotplug_work, 0); } } } @@ -4352,8 +4352,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; - cancel_delayed_work_sync( - (struct delayed_work *)&ev->hotplug_work); + cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } @@ -4545,8 +4544,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; - cancel_delayed_work_sync( - (struct delayed_work *)&ev->hotplug_work); + cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } @@ -5190,7 +5188,7 @@ static void megasas_aen_polling(struct work_struct *work) { struct megasas_aen_event *ev = - container_of(work, struct megasas_aen_event, hotplug_work); + container_of(work, struct megasas_aen_event, hotplug_work.work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; struct Scsi_Host *host; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9097155e9ebe..dcecbfb17243 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1819,8 +1819,10 @@ void target_execute_cmd(struct se_cmd *cmd) /* * If the received CDB has aleady been aborted stop processing it here. */ - if (transport_check_aborted_status(cmd, 1)) + if (transport_check_aborted_status(cmd, 1)) { + complete(&cmd->t_transport_stop_comp); return; + } /* * Determine if IOCTL context caller in requesting the stopping of this @@ -3067,7 +3069,7 @@ void transport_send_task_abort(struct se_cmd *cmd) unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 99ac2cb08b43..dedaf81d8f36 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1076,7 +1076,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, } _iov = iov + ret; size = reg->memory_size - addr + reg->guest_phys_addr; - _iov->iov_len = min((u64)len, size); + _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) (reg->userspace_addr + addr - reg->guest_phys_addr); s += size; diff --git a/fs/block_dev.c b/fs/block_dev.c index 1a1e5e3b1eaf..ab3a456f6650 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -70,19 +70,6 @@ static void bdev_inode_switch_bdi(struct inode *inode, spin_unlock(&dst->wb.list_lock); } -sector_t blkdev_max_block(struct block_device *bdev) -{ - sector_t retval = ~((sector_t)0); - loff_t sz = i_size_read(bdev->bd_inode); - - if (sz) { - unsigned int size = block_size(bdev); - unsigned int sizebits = blksize_bits(size); - retval = (sz >> sizebits); - } - return retval; -} - /* Kill _all_ buffers and pagecache , dirty or not.. */ void kill_bdev(struct block_device *bdev) { @@ -116,8 +103,6 @@ EXPORT_SYMBOL(invalidate_bdev); int set_blocksize(struct block_device *bdev, int size) { - struct address_space *mapping; - /* Size must be a power of two, and between 512 and PAGE_SIZE */ if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) return -EINVAL; @@ -126,19 +111,6 @@ int set_blocksize(struct block_device *bdev, int size) if (size < bdev_logical_block_size(bdev)) return -EINVAL; - /* Prevent starting I/O or mapping the device */ - percpu_down_write(&bdev->bd_block_size_semaphore); - - /* Check that the block device is not memory mapped */ - mapping = bdev->bd_inode->i_mapping; - mutex_lock(&mapping->i_mmap_mutex); - if (mapping_mapped(mapping)) { - mutex_unlock(&mapping->i_mmap_mutex); - percpu_up_write(&bdev->bd_block_size_semaphore); - return -EBUSY; - } - mutex_unlock(&mapping->i_mmap_mutex); - /* Don't change the size if it is same as current */ if (bdev->bd_block_size != size) { sync_blockdev(bdev); @@ -146,9 +118,6 @@ int set_blocksize(struct block_device *bdev, int size) bdev->bd_inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } - - percpu_up_write(&bdev->bd_block_size_semaphore); - return 0; } @@ -181,52 +150,12 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { - if (iblock >= blkdev_max_block(I_BDEV(inode))) { - if (create) - return -EIO; - - /* - * for reads, we're just trying to fill a partial page. - * return a hole, they will have to call get_block again - * before they can fill it, and they will get -EIO at that - * time - */ - return 0; - } bh->b_bdev = I_BDEV(inode); bh->b_blocknr = iblock; set_buffer_mapped(bh); return 0; } -static int -blkdev_get_blocks(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - sector_t end_block = blkdev_max_block(I_BDEV(inode)); - unsigned long max_blocks = bh->b_size >> inode->i_blkbits; - - if ((iblock + max_blocks) > end_block) { - max_blocks = end_block - iblock; - if ((long)max_blocks <= 0) { - if (create) - return -EIO; /* write fully beyond EOF */ - /* - * It is a read which is fully beyond EOF. We return - * a !buffer_mapped buffer - */ - max_blocks = 0; - } - } - - bh->b_bdev = I_BDEV(inode); - bh->b_blocknr = iblock; - bh->b_size = max_blocks << inode->i_blkbits; - if (max_blocks) - set_buffer_mapped(bh); - return 0; -} - static ssize_t blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) @@ -235,7 +164,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, struct inode *inode = file->f_mapping->host; return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset, - nr_segs, blkdev_get_blocks, NULL, NULL, 0); + nr_segs, blkdev_get_block, NULL, NULL, 0); } int __sync_blockdev(struct block_device *bdev, int wait) @@ -459,12 +388,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb) struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; - - if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) { - kmem_cache_free(bdev_cachep, ei); - return NULL; - } - return &ei->vfs_inode; } @@ -473,8 +396,6 @@ static void bdev_i_callback(struct rcu_head *head) struct inode *inode = container_of(head, struct inode, i_rcu); struct bdev_inode *bdi = BDEV_I(inode); - percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore); - kmem_cache_free(bdev_cachep, bdi); } @@ -1593,22 +1514,6 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) return blkdev_ioctl(bdev, mode, cmd, arg); } -ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) -{ - ssize_t ret; - struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); - - percpu_down_read(&bdev->bd_block_size_semaphore); - - ret = generic_file_aio_read(iocb, iov, nr_segs, pos); - - percpu_up_read(&bdev->bd_block_size_semaphore); - - return ret; -} -EXPORT_SYMBOL_GPL(blkdev_aio_read); - /* * Write data to the block device. Only intended for the block device itself * and the raw driver which basically is a fake block device. @@ -1620,16 +1525,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; - struct block_device *bdev = I_BDEV(file->f_mapping->host); struct blk_plug plug; ssize_t ret; BUG_ON(iocb->ki_pos != pos); blk_start_plug(&plug); - - percpu_down_read(&bdev->bd_block_size_semaphore); - ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); if (ret > 0 || ret == -EIOCBQUEUED) { ssize_t err; @@ -1638,62 +1539,27 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err < 0 && ret > 0) ret = err; } - - percpu_up_read(&bdev->bd_block_size_semaphore); - blk_finish_plug(&plug); - return ret; } EXPORT_SYMBOL_GPL(blkdev_aio_write); -static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) -{ - int ret; - struct block_device *bdev = I_BDEV(file->f_mapping->host); - - percpu_down_read(&bdev->bd_block_size_semaphore); - - ret = generic_file_mmap(file, vma); - - percpu_up_read(&bdev->bd_block_size_semaphore); - - return ret; -} - -static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags) -{ - ssize_t ret; - struct block_device *bdev = I_BDEV(file->f_mapping->host); - - percpu_down_read(&bdev->bd_block_size_semaphore); - - ret = generic_file_splice_read(file, ppos, pipe, len, flags); - - percpu_up_read(&bdev->bd_block_size_semaphore); - - return ret; -} - -static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe, - struct file *file, loff_t *ppos, size_t len, - unsigned int flags) +static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { - ssize_t ret; - struct block_device *bdev = I_BDEV(file->f_mapping->host); - - percpu_down_read(&bdev->bd_block_size_semaphore); - - ret = generic_file_splice_write(pipe, file, ppos, len, flags); + struct file *file = iocb->ki_filp; + struct inode *bd_inode = file->f_mapping->host; + loff_t size = i_size_read(bd_inode); - percpu_up_read(&bdev->bd_block_size_semaphore); + if (pos >= size) + return 0; - return ret; + size -= pos; + if (size < INT_MAX) + nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size); + return generic_file_aio_read(iocb, iov, nr_segs, pos); } - /* * Try to release a page associated with block device when the system * is under memory pressure. @@ -1724,16 +1590,16 @@ const struct file_operations def_blk_fops = { .llseek = block_llseek, .read = do_sync_read, .write = do_sync_write, - .aio_read = blkdev_aio_read, + .aio_read = blkdev_aio_read, .aio_write = blkdev_aio_write, - .mmap = blkdev_mmap, + .mmap = generic_file_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, #endif - .splice_read = blkdev_splice_read, - .splice_write = blkdev_splice_write, + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, }; int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) diff --git a/fs/buffer.c b/fs/buffer.c index b5f044283edb..ec0aca8ba6bf 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -911,6 +911,18 @@ link_dev_buffers(struct page *page, struct buffer_head *head) attach_page_buffers(page, head); } +static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) +{ + sector_t retval = ~((sector_t)0); + loff_t sz = i_size_read(bdev->bd_inode); + + if (sz) { + unsigned int sizebits = blksize_bits(size); + retval = (sz >> sizebits); + } + return retval; +} + /* * Initialise the state of a blockdev page's buffers. */ @@ -921,7 +933,7 @@ init_page_buffers(struct page *page, struct block_device *bdev, struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; int uptodate = PageUptodate(page); - sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode)); + sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); do { if (!buffer_mapped(bh)) { @@ -1552,6 +1564,28 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block) } EXPORT_SYMBOL(unmap_underlying_metadata); +/* + * Size is a power-of-two in the range 512..PAGE_SIZE, + * and the case we care about most is PAGE_SIZE. + * + * So this *could* possibly be written with those + * constraints in mind (relevant mostly if some + * architecture has a slow bit-scan instruction) + */ +static inline int block_size_bits(unsigned int blocksize) +{ + return ilog2(blocksize); +} + +static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state) +{ + BUG_ON(!PageLocked(page)); + + if (!page_has_buffers(page)) + create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); + return page_buffers(page); +} + /* * NOTE! All mapped/uptodate combinations are valid: * @@ -1589,19 +1623,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page, sector_t block; sector_t last_block; struct buffer_head *bh, *head; - const unsigned blocksize = 1 << inode->i_blkbits; + unsigned int blocksize, bbits; int nr_underway = 0; int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); - BUG_ON(!PageLocked(page)); - - last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; - - if (!page_has_buffers(page)) { - create_empty_buffers(page, blocksize, + head = create_page_buffers(page, inode, (1 << BH_Dirty)|(1 << BH_Uptodate)); - } /* * Be very careful. We have no exclusion from __set_page_dirty_buffers @@ -1613,9 +1641,12 @@ static int __block_write_full_page(struct inode *inode, struct page *page, * handle that here by just cleaning them. */ - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - head = page_buffers(page); bh = head; + blocksize = bh->b_size; + bbits = block_size_bits(blocksize); + + block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + last_block = (i_size_read(inode) - 1) >> bbits; /* * Get all the dirty buffers mapped to disk addresses and @@ -1806,12 +1837,10 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, BUG_ON(to > PAGE_CACHE_SIZE); BUG_ON(from > to); - blocksize = 1 << inode->i_blkbits; - if (!page_has_buffers(page)) - create_empty_buffers(page, blocksize, 0); - head = page_buffers(page); + head = create_page_buffers(page, inode, 0); + blocksize = head->b_size; + bbits = block_size_bits(blocksize); - bbits = inode->i_blkbits; block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); for(bh = head, block_start = 0; bh != head || !block_start; @@ -1881,11 +1910,11 @@ static int __block_commit_write(struct inode *inode, struct page *page, unsigned blocksize; struct buffer_head *bh, *head; - blocksize = 1 << inode->i_blkbits; + bh = head = page_buffers(page); + blocksize = bh->b_size; - for(bh = head = page_buffers(page), block_start = 0; - bh != head || !block_start; - block_start=block_end, bh = bh->b_this_page) { + block_start = 0; + do { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) @@ -1895,7 +1924,10 @@ static int __block_commit_write(struct inode *inode, struct page *page, mark_buffer_dirty(bh); } clear_buffer_new(bh); - } + + block_start = block_end; + bh = bh->b_this_page; + } while (bh != head); /* * If this is a partial write which happened to make all buffers @@ -2020,7 +2052,6 @@ EXPORT_SYMBOL(generic_write_end); int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, unsigned long from) { - struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; unsigned to; struct buffer_head *bh, *head; @@ -2029,13 +2060,13 @@ int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, if (!page_has_buffers(page)) return 0; - blocksize = 1 << inode->i_blkbits; + head = page_buffers(page); + blocksize = head->b_size; to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count); to = from + to; if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) return 0; - head = page_buffers(page); bh = head; block_start = 0; do { @@ -2068,18 +2099,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block) struct inode *inode = page->mapping->host; sector_t iblock, lblock; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; - unsigned int blocksize; + unsigned int blocksize, bbits; int nr, i; int fully_mapped = 1; - BUG_ON(!PageLocked(page)); - blocksize = 1 << inode->i_blkbits; - if (!page_has_buffers(page)) - create_empty_buffers(page, blocksize, 0); - head = page_buffers(page); + head = create_page_buffers(page, inode, 0); + blocksize = head->b_size; + bbits = block_size_bits(blocksize); - iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; + iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + lblock = (i_size_read(inode)+blocksize-1) >> bbits; bh = head; nr = 0; i = 0; @@ -2864,6 +2893,55 @@ static void end_bio_bh_io_sync(struct bio *bio, int err) bio_put(bio); } +/* + * This allows us to do IO even on the odd last sectors + * of a device, even if the bh block size is some multiple + * of the physical sector size. + * + * We'll just truncate the bio to the size of the device, + * and clear the end of the buffer head manually. + * + * Truly out-of-range accesses will turn into actual IO + * errors, this only handles the "we need to be able to + * do IO at the final sector" case. + */ +static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) +{ + sector_t maxsector; + unsigned bytes; + + maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; + if (!maxsector) + return; + + /* + * If the *whole* IO is past the end of the device, + * let it through, and the IO layer will turn it into + * an EIO. + */ + if (unlikely(bio->bi_sector >= maxsector)) + return; + + maxsector -= bio->bi_sector; + bytes = bio->bi_size; + if (likely((bytes >> 9) <= maxsector)) + return; + + /* Uhhuh. We've got a bh that straddles the device size! */ + bytes = maxsector << 9; + + /* Truncate the bio.. */ + bio->bi_size = bytes; + bio->bi_io_vec[0].bv_len = bytes; + + /* ..and clear the end of the buffer for reads */ + if ((rw & RW_MASK) == READ) { + void *kaddr = kmap_atomic(bh->b_page); + memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes); + kunmap_atomic(kaddr); + } +} + int submit_bh(int rw, struct buffer_head * bh) { struct bio *bio; @@ -2900,6 +2978,9 @@ int submit_bh(int rw, struct buffer_head * bh) bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; + /* Take care of bh's that straddle the end of the device */ + guard_bh_eod(rw, bio, bh); + bio_get(bio); submit_bio(rw, bio); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index edb25b4bbb95..70b6f4c3a0c1 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1794,7 +1794,6 @@ static int cifs_writepages(struct address_space *mapping, struct TCP_Server_Info *server; struct page *page; int rc = 0; - loff_t isize = i_size_read(mapping->host); /* * If wsize is smaller than the page cache size, default to writing @@ -1899,7 +1898,7 @@ retry: */ set_page_writeback(page); - if (page_offset(page) >= isize) { + if (page_offset(page) >= i_size_read(mapping->host)) { done = true; unlock_page(page); end_page_writeback(page); @@ -1932,7 +1931,8 @@ retry: wdata->offset = page_offset(wdata->pages[0]); wdata->pagesz = PAGE_CACHE_SIZE; wdata->tailsz = - min(isize - page_offset(wdata->pages[nr_pages - 1]), + min(i_size_read(mapping->host) - + page_offset(wdata->pages[nr_pages - 1]), (loff_t)PAGE_CACHE_SIZE); wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index f9b5d3d6cf33..1c576e871366 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -86,14 +86,17 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, dentry = d_lookup(parent, name); if (dentry) { + int err; inode = dentry->d_inode; /* update inode in place if i_ino didn't change */ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { cifs_fattr_to_inode(inode, fattr); return dentry; } - d_drop(dentry); + err = d_invalidate(dentry); dput(dentry); + if (err) + return NULL; } dentry = d_alloc(parent, name); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 56cc4be87807..34cea2798333 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -766,7 +766,6 @@ smb_set_file_info(struct inode *inode, const char *full_path, struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = NULL; struct cifs_tcon *tcon; - FILE_BASIC_INFO info_buf; /* if the file is already open for write, just use that fileid */ open_file = find_writable_file(cinode, true); @@ -817,7 +816,7 @@ smb_set_file_info(struct inode *inode, const char *full_path, netpid = current->tgid; set_via_filehandle: - rc = CIFSSMBSetFileInfo(xid, tcon, &info_buf, netfid, netpid); + rc = CIFSSMBSetFileInfo(xid, tcon, buf, netfid, netpid); if (!rc) cinode->cifsAttrs = le32_to_cpu(buf->Attributes); diff --git a/fs/direct-io.c b/fs/direct-io.c index f86c720dba0e..cf5b44b10c67 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -540,6 +540,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ unsigned long fs_count; /* Number of filesystem-sized blocks */ int create; + unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; /* * If there was a memory error and we've overwritten all the @@ -554,7 +555,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, fs_count = fs_endblk - fs_startblk + 1; map_bh->b_state = 0; - map_bh->b_size = fs_count << dio->inode->i_blkbits; + map_bh->b_size = fs_count << i_blkbits; /* * For writes inside i_size on a DIO_SKIP_HOLES filesystem we @@ -1053,7 +1054,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, int seg; size_t size; unsigned long addr; - unsigned blkbits = inode->i_blkbits; + unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); + unsigned blkbits = i_blkbits; unsigned blocksize_mask = (1 << blkbits) - 1; ssize_t retval = -EINVAL; loff_t end = offset; @@ -1149,7 +1151,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, dio->inode = inode; dio->rw = rw; sdio.blkbits = blkbits; - sdio.blkfactor = inode->i_blkbits - blkbits; + sdio.blkfactor = i_blkbits - blkbits; sdio.block_in_file = offset >> blkbits; sdio.get_block = get_block; diff --git a/fs/file.c b/fs/file.c index 7cb71b992603..eff23162485f 100644 --- a/fs/file.c +++ b/fs/file.c @@ -994,16 +994,18 @@ int iterate_fd(struct files_struct *files, unsigned n, const void *p) { struct fdtable *fdt; - struct file *file; int res = 0; if (!files) return 0; spin_lock(&files->file_lock); - fdt = files_fdtable(files); - while (!res && n < fdt->max_fds) { - file = rcu_dereference_check_fdtable(files, fdt->fd[n++]); - if (file) - res = f(p, file, n); + for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { + struct file *file; + file = rcu_dereference_check_fdtable(files, fdt->fd[n]); + if (!file) + continue; + res = f(p, file, n); + if (res) + break; } spin_unlock(&files->file_lock); return res; diff --git a/fs/namei.c b/fs/namei.c index 937f9d50c84b..5f4cdf3ad913 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2131,6 +2131,11 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) if (!len) return ERR_PTR(-EACCES); + if (unlikely(name[0] == '.')) { + if (len < 2 || (len == 2 && name[1] == '.')) + return ERR_PTR(-EACCES); + } + while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ce8cb926526b..b9e66b7e0c14 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -450,7 +450,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) nfs_refresh_inode(dentry->d_inode, entry->fattr); goto out; } else { - d_drop(dentry); + if (d_invalidate(dentry) != 0) + goto out; dput(dentry); } } @@ -1100,6 +1101,8 @@ out_set_verifier: out_zap_parent: nfs_zap_caches(dir); out_bad: + nfs_free_fattr(fattr); + nfs_free_fhandle(fhandle); nfs_mark_for_revalidate(dir); if (inode && S_ISDIR(inode->i_mode)) { /* Purge readdir caches. */ @@ -1112,8 +1115,6 @@ out_zap_parent: shrink_dcache_parent(dentry); } d_drop(dentry); - nfs_free_fattr(fattr); - nfs_free_fhandle(fhandle); dput(parent); dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n", __func__, dentry->d_parent->d_name.name, diff --git a/include/linux/fs.h b/include/linux/fs.h index b33cfc97b9ca..75fe9a134803 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -462,8 +462,6 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; - /* A semaphore that prevents I/O while block size is being changed */ - struct percpu_rw_semaphore bd_block_size_semaphore; }; /* @@ -2049,7 +2047,6 @@ extern void unregister_blkdev(unsigned int, const char *); extern struct block_device *bdget(dev_t); extern struct block_device *bdgrab(struct block_device *bdev); extern void bd_set_size(struct block_device *, loff_t size); -extern sector_t blkdev_max_block(struct block_device *bdev); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); @@ -2379,8 +2376,6 @@ extern int generic_segment_checks(const struct iovec *iov, unsigned long *nr_segs, size_t *count, int access_flags); /* fs/block_dev.c */ -extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos); extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 6ae9c631a1be..0464c85e63fd 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -1,35 +1,8 @@ #ifndef _LINUX_HW_BREAKPOINT_H #define _LINUX_HW_BREAKPOINT_H -enum { - HW_BREAKPOINT_LEN_1 = 1, - HW_BREAKPOINT_LEN_2 = 2, - HW_BREAKPOINT_LEN_4 = 4, - HW_BREAKPOINT_LEN_8 = 8, -}; - -enum { - HW_BREAKPOINT_EMPTY = 0, - HW_BREAKPOINT_R = 1, - HW_BREAKPOINT_W = 2, - HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W, - HW_BREAKPOINT_X = 4, - HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X, -}; - -enum bp_type_idx { - TYPE_INST = 0, -#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS - TYPE_DATA = 0, -#else - TYPE_DATA = 1, -#endif - TYPE_MAX -}; - -#ifdef __KERNEL__ - #include +#include #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -151,6 +124,4 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ -#endif /* __KERNEL__ */ - #endif /* _LINUX_HW_BREAKPOINT_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index a123b13b70fd..7d8dfc7392f1 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -701,6 +701,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #define COMPACTION_BUILD 0 #endif +/* This helps us to avoid #ifdef CONFIG_SYMBOL_PREFIX */ +#ifdef CONFIG_SYMBOL_PREFIX +#define SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX +#else +#define SYMBOL_PREFIX "" +#endif + /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index e5ccb9ddd90e..dbd212723b74 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -82,16 +82,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) __mpol_put(pol); } -extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol); -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol) -{ - if (!frompol) - return frompol; - return __mpol_cond_copy(tompol, frompol); -} - extern struct mempolicy *__mpol_dup(struct mempolicy *pol); static inline struct mempolicy *mpol_dup(struct mempolicy *pol) { @@ -215,12 +205,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) { } -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, - struct mempolicy *from) -{ - return from; -} - static inline void mpol_get(struct mempolicy *pol) { } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f8eda0276f03..a848ffc327f4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1488,6 +1488,9 @@ struct napi_gro_cb { /* Used in ipv6_gro_receive() */ int proto; + + /* used in skb_gro_receive() slow path */ + struct sk_buff *last; }; #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 250a4acddb2b..bd1e86071e57 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -13,7 +13,7 @@ struct percpu_rw_semaphore { }; #define light_mb() barrier() -#define heavy_mb() synchronize_sched() +#define heavy_mb() synchronize_sched_expedited() static inline void percpu_down_read(struct percpu_rw_semaphore *p) { @@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p) { mutex_lock(&p->mtx); p->locked = true; - synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */ + synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */ while (__percpu_count(p->counters)) msleep(1); heavy_mb(); /* C, between read of p->counter and write to data, paired with B */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6feeccd83dd7..4af45e33105d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -525,6 +525,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk, extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle); extern bool tcp_may_send_now(struct sock *sk); +extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern void tcp_retransmit_timer(struct sock *sk); extern void tcp_xmit_retransmit_queue(struct sock *); diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index e194387ef784..19e765fbfef7 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -415,3 +415,4 @@ header-y += wireless.h header-y += x25.h header-y += xattr.h header-y += xfrm.h +header-y += hw_breakpoint.h diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h new file mode 100644 index 000000000000..b04000a2296a --- /dev/null +++ b/include/uapi/linux/hw_breakpoint.h @@ -0,0 +1,30 @@ +#ifndef _UAPI_LINUX_HW_BREAKPOINT_H +#define _UAPI_LINUX_HW_BREAKPOINT_H + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_8 = 8, +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X, +}; + +enum bp_type_idx { + TYPE_INST = 0, +#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS + TYPE_DATA = 0, +#else + TYPE_DATA = 1, +#endif + TYPE_MAX +}; + +#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */ diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 9a7b487c6fe2..fe8a916507ed 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) * Count the number of breakpoints of the same type and same task. * The given event must be not on the list. */ -static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) +static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) { struct task_struct *tsk = bp->hw.bp_target; struct perf_event *iter; int count = 0; list_for_each_entry(iter, &bp_task_head, hw.bp_list) { - if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) + if (iter->hw.bp_target == tsk && + find_slot_idx(iter) == type && + cpu == iter->cpu) count += hw_breakpoint_weight(iter); } @@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, if (!tsk) slots->pinned += max_task_bp_pinned(cpu, type); else - slots->pinned += task_bp_pinned(bp, type); + slots->pinned += task_bp_pinned(cpu, bp, type); slots->flexible = per_cpu(nr_bp_flexible[type], cpu); return; @@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, if (!tsk) nr += max_task_bp_pinned(cpu, type); else - nr += task_bp_pinned(bp, type); + nr += task_bp_pinned(cpu, bp, type); if (nr > slots->pinned) slots->pinned = nr; @@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, int old_idx = 0; int idx = 0; - old_count = task_bp_pinned(bp, type); + old_count = task_bp_pinned(cpu, bp, type); old_idx = old_count - 1; idx = old_idx + weight; diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c index 4646eb2c3820..767e559dfb10 100644 --- a/kernel/modsign_pubkey.c +++ b/kernel/modsign_pubkey.c @@ -21,10 +21,10 @@ struct key *modsign_keyring; extern __initdata const u8 modsign_certificate_list[]; extern __initdata const u8 modsign_certificate_list_end[]; asm(".section .init.data,\"aw\"\n" - "modsign_certificate_list:\n" + SYMBOL_PREFIX "modsign_certificate_list:\n" ".incbin \"signing_key.x509\"\n" ".incbin \"extra_certificates\"\n" - "modsign_certificate_list_end:" + SYMBOL_PREFIX "modsign_certificate_list_end:" ); /* diff --git a/kernel/module_signing.c b/kernel/module_signing.c index ea1b1df5dbb0..f2970bddc5ea 100644 --- a/kernel/module_signing.c +++ b/kernel/module_signing.c @@ -27,13 +27,13 @@ * - Information block */ struct module_signature { - enum pkey_algo algo : 8; /* Public-key crypto algorithm */ - enum pkey_hash_algo hash : 8; /* Digest algorithm */ - enum pkey_id_type id_type : 8; /* Key identifier type */ - u8 signer_len; /* Length of signer's name */ - u8 key_id_len; /* Length of key identifier */ - u8 __pad[3]; - __be32 sig_len; /* Length of signature data */ + u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */ + u8 hash; /* Digest algorithm [enum pkey_hash_algo] */ + u8 id_type; /* Key identifier type [enum pkey_id_type] */ + u8 signer_len; /* Length of signer's name */ + u8 key_id_len; /* Length of key identifier */ + u8 __pad[3]; + __be32 sig_len; /* Length of signature data */ }; /* diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 0984a21076a3..15f60d01198b 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) p->signal->autogroup = autogroup_kref_get(ag); - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) - goto out; - t = p; do { sched_move_task(t); } while_each_thread(p, t); -out: unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h index 8bd047142816..443232ebbb53 100644 --- a/kernel/sched/auto_group.h +++ b/kernel/sched/auto_group.h @@ -4,11 +4,6 @@ #include struct autogroup { - /* - * reference doesn't mean how many thread attach to this - * autogroup now. It just stands for the number of task - * could use this autogroup. - */ struct kref kref; struct task_group *tg; struct rw_semaphore lock; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index dd4b80a9f1a9..c8c21be11ab4 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -368,6 +368,9 @@ static void watchdog_disable(unsigned int cpu) { struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); + if (!watchdog_enabled) + return; + watchdog_set_prio(SCHED_NORMAL, 0); hrtimer_cancel(hrtimer); /* disable the perf event */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 042d221d33cc..1dae900df798 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, WARN_ON_ONCE(timer->function != delayed_work_timer_fn || timer->data != (unsigned long)dwork); - BUG_ON(timer_pending(timer)); - BUG_ON(!list_empty(&work->entry)); + WARN_ON_ONCE(timer_pending(timer)); + WARN_ON_ONCE(!list_empty(&work->entry)); + + /* + * If @delay is 0, queue @dwork->work immediately. This is for + * both optimization and correctness. The earliest @timer can + * expire is on the closest next tick and delayed_work users depend + * on that there's no such delay when @delay is 0. + */ + if (!delay) { + __queue_work(cpu, wq, &dwork->work); + return; + } timer_stats_timer_set_start_info(&dwork->timer); @@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, bool ret = false; unsigned long flags; - if (!delay) - return queue_work_on(cpu, wq, &dwork->work); - /* read the comment in __queue_work() */ local_irq_save(flags); @@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq) repeat: set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); return 0; + } /* * See whether any cpu is asking for help. Unbounded diff --git a/lib/Makefile b/lib/Makefile index 821a16229111..a08b791200f3 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -163,7 +163,7 @@ $(obj)/crc32table.h: $(obj)/gen_crc32table # obj-$(CONFIG_OID_REGISTRY) += oid_registry.o -$(obj)/oid_registry.c: $(obj)/oid_registry_data.c +$(obj)/oid_registry.o: $(obj)/oid_registry_data.c $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \ $(src)/build_OID_registry diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index de2c8b5a715b..5293d2433029 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -91,7 +91,7 @@ next_tag: /* Extract the length */ len = data[dp++]; - if (len < 0x7f) { + if (len <= 0x7f) { dp += len; goto next_tag; } diff --git a/mm/compaction.c b/mm/compaction.c index 9eef55838fca..694eaabaaebd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -713,7 +713,15 @@ static void isolate_freepages(struct zone *zone, /* Found a block suitable for isolating free pages from */ isolated = 0; - end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); + + /* + * As pfn may not start aligned, pfn+pageblock_nr_page + * may cross a MAX_ORDER_NR_PAGES boundary and miss + * a pfn_valid check. Ensure isolate_freepages_block() + * only scans within a pageblock + */ + end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); + end_pfn = min(end_pfn, zone_end_pfn); isolated = isolate_freepages_block(cc, pfn, end_pfn, freelist, false); nr_freepages += isolated; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6c5899b9034a..8b20278be6a6 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags) { int ret; unsigned long pfn = page_to_pfn(page); + struct page *hpage = compound_trans_head(page); if (PageHuge(page)) return soft_offline_huge_page(page, flags); + if (PageTransHuge(hpage)) { + if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { + pr_info("soft offline: %#lx: failed to split THP\n", + pfn); + return -EBUSY; + } + } ret = get_any_page(page, pfn, flags); if (ret < 0) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d04a8a54c294..4ea600da8940 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2037,28 +2037,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) return new; } -/* - * If *frompol needs [has] an extra ref, copy *frompol to *tompol , - * eliminate the * MPOL_F_* flags that require conditional ref and - * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly - * after return. Use the returned value. - * - * Allows use of a mempolicy for, e.g., multiple allocations with a single - * policy lookup, even if the policy needs/has extra ref on lookup. - * shmem_readahead needs this. - */ -struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol) -{ - if (!mpol_needs_cond_ref(frompol)) - return frompol; - - *tompol = *frompol; - tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ - __mpol_put(frompol); - return tompol; -} - /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 92871579cbee..7e208f0ad68c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) } } - return 1UL << order; + return 1UL << alloc_order; } /* diff --git a/mm/shmem.c b/mm/shmem.c index 89341b658bd0..50c5b8f3a359 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -910,25 +910,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { - struct mempolicy mpol, *spol; struct vm_area_struct pvma; - - spol = mpol_cond_copy(&mpol, - mpol_shared_policy_lookup(&info->policy, index)); + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; /* Bias interleave by inode number to distribute better across nodes */ pvma.vm_pgoff = index + info->vfs_inode.i_ino; pvma.vm_ops = NULL; - pvma.vm_policy = spol; - return swapin_readahead(swap, gfp, &pvma, 0); + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); + + page = swapin_readahead(swap, gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; @@ -937,10 +941,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, pvma.vm_ops = NULL; pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); - /* - * alloc_page_vma() will drop the shared policy reference - */ - return alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma(gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } #else /* !CONFIG_NUMA */ #ifdef CONFIG_TMPFS diff --git a/mm/sparse.c b/mm/sparse.c index fac95f2888f2..a83de2f72b30 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { return; /* XXX: Not implemented yet */ } -static void free_map_bootmem(struct page *page, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) { } #else @@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) get_order(sizeof(struct page) * nr_pages)); } -static void free_map_bootmem(struct page *page, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) { unsigned long maps_section_nr, removing_section_nr, i; unsigned long magic; + struct page *page = virt_to_page(memmap); for (i = 0; i < nr_pages; i++, page++) { magic = (unsigned long) page->lru.next; @@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) */ if (memmap) { - struct page *memmap_page; - memmap_page = virt_to_page(memmap); - nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; - free_map_bootmem(memmap_page, nr_pages); + free_map_bootmem(memmap, nr_pages); } } diff --git a/mm/vmscan.c b/mm/vmscan.c index cbf84e152f04..b7ed37675644 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2414,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc) } while (memcg); } +static bool zone_balanced(struct zone *zone, int order, + unsigned long balance_gap, int classzone_idx) +{ + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + + balance_gap, classzone_idx, 0)) + return false; + + if (COMPACTION_BUILD && order && !compaction_suitable(zone, order)) + return false; + + return true; +} + /* * pgdat_balanced is used when checking if a node is balanced for high-order * allocations. Only zones that meet watermarks and are in a zone allowed @@ -2492,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, continue; } - if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), - i, 0)) + if (!zone_balanced(zone, order, 0, i)) all_zones_ok = false; else balanced += zone->present_pages; @@ -2602,8 +2614,7 @@ loop_again: break; } - if (!zone_watermark_ok_safe(zone, order, - high_wmark_pages(zone), 0, 0)) { + if (!zone_balanced(zone, order, 0, 0)) { end_zone = i; break; } else { @@ -2679,9 +2690,8 @@ loop_again: testorder = 0; if ((buffer_heads_over_limit && is_highmem_idx(i)) || - !zone_watermark_ok_safe(zone, testorder, - high_wmark_pages(zone) + balance_gap, - end_zone, 0)) { + !zone_balanced(zone, testorder, + balance_gap, end_zone)) { shrink_zone(zone, &sc); reclaim_state->reclaimed_slab = 0; @@ -2708,8 +2718,7 @@ loop_again: continue; } - if (!zone_watermark_ok_safe(zone, testorder, - high_wmark_pages(zone), end_zone, 0)) { + if (!zone_balanced(zone, testorder, 0, end_zone)) { all_zones_ok = 0; /* * We are still under min water mark. This @@ -2814,29 +2823,10 @@ out: if (!populated_zone(zone)) continue; - if (zone->all_unreclaimable && - sc.priority != DEF_PRIORITY) - continue; - - /* Would compaction fail due to lack of free memory? */ - if (COMPACTION_BUILD && - compaction_suitable(zone, order) == COMPACT_SKIPPED) - goto loop_again; - - /* Confirm the zone is balanced for order-0 */ - if (!zone_watermark_ok(zone, 0, - high_wmark_pages(zone), 0, 0)) { - order = sc.order = 0; - goto loop_again; - } - /* Check if the memory needs to be defragmented. */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), *classzone_idx, 0)) zones_need_compaction = 0; - - /* If balanced, clear the congested flag */ - zone_clear_flag(zone, ZONE_CONGESTED); } if (zones_need_compaction) diff --git a/net/can/bcm.c b/net/can/bcm.c index 6f747582718e..969b7cdff59d 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->sk = sk; op->ifindex = ifindex; + /* ifindex for timeout events w/o previous frame reception */ + op->rx_ifindex = ifindex; + /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); op->timer.function = bcm_rx_timeout_handler; diff --git a/net/core/dev.c b/net/core/dev.c index c0946cb2b354..e5942bf45a6d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3451,6 +3451,8 @@ static int napi_gro_complete(struct sk_buff *skb) struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; int err = -ENOENT; + BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); + if (NAPI_GRO_CB(skb)->count == 1) { skb_shinfo(skb)->gso_size = 0; goto out; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4007c1437fda..3f0636cd76cd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3004,7 +3004,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) skb_shinfo(nskb)->gso_size = pinfo->gso_size; pinfo->gso_size = 0; skb_header_release(p); - nskb->prev = p; + NAPI_GRO_CB(nskb)->last = p; nskb->data_len += p->len; nskb->truesize += p->truesize; @@ -3030,8 +3030,8 @@ merge: __skb_pull(skb, offset); - p->prev->next = skb; - p->prev = skb; + NAPI_GRO_CB(p)->last->next = skb; + NAPI_GRO_CB(p)->last = skb; skb_header_release(skb); done: diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index f2eccd531746..17ff9fd7cdda 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -257,7 +257,8 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1); rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); - inet_putpeer(peer); + if (peer) + inet_putpeer(peer); } out: return rc; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 0c34bfabc11f..e23e16dc501d 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -44,6 +44,10 @@ struct inet_diag_entry { u16 dport; u16 family; u16 userlocks; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */ + struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */ +#endif }; static DEFINE_MUTEX(inet_diag_table_mutex); @@ -428,25 +432,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc, break; } - if (cond->prefix_len == 0) - break; - if (op->code == INET_DIAG_BC_S_COND) addr = entry->saddr; else addr = entry->daddr; + if (cond->family != AF_UNSPEC && + cond->family != entry->family) { + if (entry->family == AF_INET6 && + cond->family == AF_INET) { + if (addr[0] == 0 && addr[1] == 0 && + addr[2] == htonl(0xffff) && + bitstring_match(addr + 3, + cond->addr, + cond->prefix_len)) + break; + } + yes = 0; + break; + } + + if (cond->prefix_len == 0) + break; if (bitstring_match(addr, cond->addr, cond->prefix_len)) break; - if (entry->family == AF_INET6 && - cond->family == AF_INET) { - if (addr[0] == 0 && addr[1] == 0 && - addr[2] == htonl(0xffff) && - bitstring_match(addr + 3, cond->addr, - cond->prefix_len)) - break; - } yes = 0; break; } @@ -509,6 +519,55 @@ static int valid_cc(const void *bc, int len, int cc) return 0; } +/* Validate an inet_diag_hostcond. */ +static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, + int *min_len) +{ + int addr_len; + struct inet_diag_hostcond *cond; + + /* Check hostcond space. */ + *min_len += sizeof(struct inet_diag_hostcond); + if (len < *min_len) + return false; + cond = (struct inet_diag_hostcond *)(op + 1); + + /* Check address family and address length. */ + switch (cond->family) { + case AF_UNSPEC: + addr_len = 0; + break; + case AF_INET: + addr_len = sizeof(struct in_addr); + break; + case AF_INET6: + addr_len = sizeof(struct in6_addr); + break; + default: + return false; + } + *min_len += addr_len; + if (len < *min_len) + return false; + + /* Check prefix length (in bits) vs address length (in bytes). */ + if (cond->prefix_len > 8 * addr_len) + return false; + + return true; +} + +/* Validate a port comparison operator. */ +static inline bool valid_port_comparison(const struct inet_diag_bc_op *op, + int len, int *min_len) +{ + /* Port comparisons put the port in a follow-on inet_diag_bc_op. */ + *min_len += sizeof(struct inet_diag_bc_op); + if (len < *min_len) + return false; + return true; +} + static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) { const void *bc = bytecode; @@ -516,29 +575,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) while (len > 0) { const struct inet_diag_bc_op *op = bc; + int min_len = sizeof(struct inet_diag_bc_op); //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); switch (op->code) { - case INET_DIAG_BC_AUTO: case INET_DIAG_BC_S_COND: case INET_DIAG_BC_D_COND: + if (!valid_hostcond(bc, len, &min_len)) + return -EINVAL; + break; case INET_DIAG_BC_S_GE: case INET_DIAG_BC_S_LE: case INET_DIAG_BC_D_GE: case INET_DIAG_BC_D_LE: - case INET_DIAG_BC_JMP: - if (op->no < 4 || op->no > len + 4 || op->no & 3) - return -EINVAL; - if (op->no < len && - !valid_cc(bytecode, bytecode_len, len - op->no)) + if (!valid_port_comparison(bc, len, &min_len)) return -EINVAL; break; + case INET_DIAG_BC_AUTO: + case INET_DIAG_BC_JMP: case INET_DIAG_BC_NOP: break; default: return -EINVAL; } - if (op->yes < 4 || op->yes > len + 4 || op->yes & 3) + + if (op->code != INET_DIAG_BC_NOP) { + if (op->no < min_len || op->no > len + 4 || op->no & 3) + return -EINVAL; + if (op->no < len && + !valid_cc(bytecode, bytecode_len, len - op->no)) + return -EINVAL; + } + + if (op->yes < min_len || op->yes > len + 4 || op->yes & 3) return -EINVAL; bc += op->yes; len -= op->yes; @@ -596,6 +665,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } +/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses + * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6. + */ +static inline void inet_diag_req_addrs(const struct sock *sk, + const struct request_sock *req, + struct inet_diag_entry *entry) +{ + struct inet_request_sock *ireq = inet_rsk(req); + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + if (req->rsk_ops->family == AF_INET6) { + entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32; + entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32; + } else if (req->rsk_ops->family == AF_INET) { + ipv6_addr_set_v4mapped(ireq->loc_addr, + &entry->saddr_storage); + ipv6_addr_set_v4mapped(ireq->rmt_addr, + &entry->daddr_storage); + entry->saddr = entry->saddr_storage.s6_addr32; + entry->daddr = entry->daddr_storage.s6_addr32; + } + } else +#endif + { + entry->saddr = &ireq->loc_addr; + entry->daddr = &ireq->rmt_addr; + } +} + static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, struct request_sock *req, struct user_namespace *user_ns, @@ -637,8 +736,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, r->idiag_inode = 0; #if IS_ENABLED(CONFIG_IPV6) if (r->idiag_family == AF_INET6) { - *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; - *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; + struct inet_diag_entry entry; + inet_diag_req_addrs(sk, req, &entry); + memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr)); + memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr)); } #endif @@ -691,18 +792,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, continue; if (bc) { - entry.saddr = -#if IS_ENABLED(CONFIG_IPV6) - (entry.family == AF_INET6) ? - inet6_rsk(req)->loc_addr.s6_addr32 : -#endif - &ireq->loc_addr; - entry.daddr = -#if IS_ENABLED(CONFIG_IPV6) - (entry.family == AF_INET6) ? - inet6_rsk(req)->rmt_addr.s6_addr32 : -#endif - &ireq->rmt_addr; + inet_diag_req_addrs(sk, req, &entry); entry.dport = ntohs(ireq->rmt_port); if (!inet_diag_bc_run(bc, &entry)) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 448e68546827..8d5cc75dac88 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -707,28 +707,27 @@ EXPORT_SYMBOL(ip_defrag); struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) { - const struct iphdr *iph; + struct iphdr iph; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; - if (!pskb_may_pull(skb, sizeof(struct iphdr))) + if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) return skb; - iph = ip_hdr(skb); - if (iph->ihl < 5 || iph->version != 4) + if (iph.ihl < 5 || iph.version != 4) return skb; - if (!pskb_may_pull(skb, iph->ihl*4)) - return skb; - iph = ip_hdr(skb); - len = ntohs(iph->tot_len); - if (skb->len < len || len < (iph->ihl * 4)) + + len = ntohs(iph.tot_len); + if (skb->len < len || len < (iph.ihl * 4)) return skb; - if (ip_is_fragment(ip_hdr(skb))) { + if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { + if (!pskb_may_pull(skb, iph.ihl*4)) + return skb; if (pskb_trim_rcsum(skb, len)) return skb; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 6168c4dc58b1..3eab2b2ffd34 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1318,6 +1318,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi if (get_user(v, (u32 __user *)optval)) return -EFAULT; + /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ + if (v != RT_TABLE_DEFAULT && v >= 1000000000) + return -EINVAL; + rtnl_lock(); ret = 0; if (sk == rtnl_dereference(mrt->mroute_sk)) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 083092e3aed6..e457c7ab2e28 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -830,8 +830,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) return mss_now; } -static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, - size_t psize, int flags) +static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, + size_t size, int flags) { struct tcp_sock *tp = tcp_sk(sk); int mss_now, size_goal; @@ -858,12 +858,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; - while (psize > 0) { + while (size > 0) { struct sk_buff *skb = tcp_write_queue_tail(sk); - struct page *page = pages[poffset / PAGE_SIZE]; int copy, i; - int offset = poffset % PAGE_SIZE; - int size = min_t(size_t, psize, PAGE_SIZE - offset); bool can_coalesce; if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { @@ -912,8 +909,8 @@ new_segment: TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; copied += copy; - poffset += copy; - if (!(psize -= copy)) + offset += copy; + if (!(size -= copy)) goto out; if (skb->len < size_goal || (flags & MSG_OOB)) @@ -960,7 +957,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, flags); lock_sock(sk); - res = do_tcp_sendpages(sk, &page, offset, size, flags); + res = do_tcp_sendpages(sk, page, offset, size, flags); release_sock(sk); return res; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 609ff98aeb47..181fc8234a52 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5645,7 +5645,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); if (data) { /* Retransmit unacked data in SYN */ - tcp_retransmit_skb(sk, data); + tcp_for_write_queue_from(data, sk) { + if (data == tcp_send_head(sk) || + __tcp_retransmit_skb(sk, data)) + break; + } tcp_rearm_rto(sk); return true; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2798706cb063..948ac275b9b5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2309,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, * state updates are done by the caller. Returns non-zero if an * error occurred which prevented the send. */ -int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) +int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); unsigned int cur_mss; - int err; /* Inconslusive MTU probe */ if (icsk->icsk_mtup.probe_size) { @@ -2387,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); - err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : - -ENOBUFS; + return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : + -ENOBUFS; } else { - err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); + return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } +} + +int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + int err = __tcp_retransmit_skb(sk, skb); if (err == 0) { /* Update global TCP statistics. */ diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 1002e3396f72..ae43c62f9045 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -441,6 +441,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); if (lsap == NULL) { IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__); + __irttp_close_tsap(self); return NULL; } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 83608ac16780..2c84185dfdb0 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -458,8 +458,6 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) list_move_tail(&roc->list, &tmp_list); roc->abort = true; } - - ieee80211_start_next_roc(local); mutex_unlock(&local->mtx); list_for_each_entry_safe(roc, tmp, &tmp_list, list) { diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index b9a63381e349..45a101439bc5 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -793,7 +793,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, - .len = IPSET_MAXNAMELEN - 1 }, + .len = IFNAMSIZ - 1 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 98c70630ad06..733cbf49ed1f 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -702,15 +702,11 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); - - if (key->ip.proto == ARPOP_REQUEST - || key->ip.proto == ARPOP_REPLY) { - memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); - memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); - memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); - memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); - } + memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); + memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); + memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); + memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); + key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 3c1e58ba714b..a9033481fa5e 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -158,7 +158,7 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb) if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", - ovs_dp_name(vport->dp), + netdev_vport->dev->name, packet_length(skb), mtu); goto error; } diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 7c2df9c33df3..69ce21e3716f 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, msg = sctp_datamsg_new(GFP_KERNEL); if (!msg) - return NULL; + return ERR_PTR(-ENOMEM); /* Note: Calculate this outside of the loop, so that all fragments * have the same expiration. @@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto errout; + } + err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov); if (err < 0) - goto errout; + goto errout_chunk_free; offset += len; @@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto errout; + } err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov); @@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - (__u8 *)chunk->skb->data); if (err < 0) - goto errout; + goto errout_chunk_free; sctp_datamsg_assign(msg, chunk); list_add_tail(&chunk->frag_list, &msg->chunks); @@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, return msg; +errout_chunk_free: + sctp_chunk_free(chunk); + errout: list_for_each_safe(pos, temp, &msg->chunks) { list_del_init(pos); @@ -339,7 +347,7 @@ errout: sctp_chunk_free(chunk); } sctp_datamsg_put(msg); - return NULL; + return ERR_PTR(err); } /* Check whether this message has expired. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a60d1f8b41c5..406d957d08fb 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1915,8 +1915,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); - if (!datamsg) { - err = -ENOMEM; + if (IS_ERR(datamsg)) { + err = PTR_ERR(datamsg); goto out_free; } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 953c21e4af97..206cf5238fd3 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -331,7 +331,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) * 1/8, rto_alpha would be expressed as 3. */ tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) - + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta); + + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) + (rtt >> net->sctp.rto_alpha); } else { diff --git a/tools/Makefile b/tools/Makefile index 3ae43947a171..1f9a529fe544 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -31,44 +31,44 @@ help: @echo ' clean: a summary clean target to clean _all_ folders' cpupower: FORCE - $(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1) + $(call descend,power/$@) firewire lguest perf usb virtio vm: FORCE - $(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1) + $(call descend,$@) selftests: FORCE - $(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1) + $(call descend,testing/$@) turbostat x86_energy_perf_policy: FORCE - $(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1) + $(call descend,power/x86/$@) cpupower_install: - $(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install + $(call descend,power/$(@:_install=),install) firewire_install lguest_install perf_install usb_install virtio_install vm_install: - $(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install + $(call descend,$(@:_install=),install) selftests_install: - $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install + $(call descend,testing/$(@:_clean=),install) turbostat_install x86_energy_perf_policy_install: - $(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install + $(call descend,power/x86/$(@:_install=),install) install: cpupower_install firewire_install lguest_install perf_install \ selftests_install turbostat_install usb_install virtio_install \ vm_install x86_energy_perf_policy_install cpupower_clean: - $(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean + $(call descend,power/cpupower,clean) firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean: - $(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean + $(call descend,$(@:_clean=),clean) selftests_clean: - $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean + $(call descend,testing/$(@:_clean=),clean) turbostat_clean x86_energy_perf_policy_clean: - $(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean + $(call descend,power/x86/$(@:_clean=),clean) clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \ turbostat_clean usb_clean virtio_clean vm_clean \ diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 00deed4d6159..0a619af5be43 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -169,7 +169,34 @@ endif ### --- END CONFIGURATION SECTION --- -BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(shell pwd))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +#$(info Determined 'srctree' to be $(srctree)) +endif + +ifneq ($(objtree),) +#$(info Determined 'objtree' to be $(objtree)) +endif + +ifneq ($(OUTPUT),) +#$(info Determined 'OUTPUT' to be $(OUTPUT)) +endif + +BASIC_CFLAGS = \ + -Iutil/include \ + -Iarch/$(ARCH)/include \ + $(if $(objtree),-I$(objtree)/arch/$(ARCH)/include/generated/uapi) \ + -I$(srctree)/arch/$(ARCH)/include/uapi \ + -I$(srctree)/arch/$(ARCH)/include \ + $(if $(objtree),-I$(objtree)/include/generated/uapi) \ + -I$(srctree)/include/uapi \ + -I$(srctree)/include \ + -I$(OUTPUT)util \ + -Iutil \ + -I. \ + -I$(TRACE_EVENT_DIR) \ + -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE BASIC_LDFLAGS = # Guard against environment variables diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h index 46fc9f15c6b3..7fcdcdbee917 100644 --- a/tools/perf/arch/x86/include/perf_regs.h +++ b/tools/perf/arch/x86/include/perf_regs.h @@ -3,7 +3,7 @@ #include #include "../../util/types.h" -#include "../../../../../arch/x86/include/asm/perf_regs.h" +#include #ifndef ARCH_X86_64 #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 260abc535b5b..283b4397e397 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -22,9 +22,10 @@ #include #include -#include "../../arch/x86/include/asm/svm.h" -#include "../../arch/x86/include/asm/vmx.h" -#include "../../arch/x86/include/asm/kvm.h" +#if defined(__i386__) || defined(__x86_64__) +#include +#include +#include struct event_key { #define INVALID_KEY (~0ULL) @@ -58,7 +59,7 @@ struct kvm_event_key { }; -struct perf_kvm; +struct perf_kvm_stat; struct kvm_events_ops { bool (*is_begin_event)(struct perf_evsel *evsel, @@ -66,7 +67,7 @@ struct kvm_events_ops { struct event_key *key); bool (*is_end_event)(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key); - void (*decode_key)(struct perf_kvm *kvm, struct event_key *key, + void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key, char decode[20]); const char *name; }; @@ -79,7 +80,7 @@ struct exit_reasons_table { #define EVENTS_BITS 12 #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) -struct perf_kvm { +struct perf_kvm_stat { struct perf_tool tool; struct perf_session *session; @@ -146,7 +147,7 @@ static struct exit_reasons_table svm_exit_reasons[] = { SVM_EXIT_REASONS }; -static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code) +static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code) { int i = kvm->exit_reasons_size; struct exit_reasons_table *tbl = kvm->exit_reasons; @@ -162,7 +163,7 @@ static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code) return "UNKNOWN"; } -static void exit_event_decode_key(struct perf_kvm *kvm, +static void exit_event_decode_key(struct perf_kvm_stat *kvm, struct event_key *key, char decode[20]) { @@ -228,7 +229,7 @@ static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, return false; } -static void mmio_event_decode_key(struct perf_kvm *kvm __maybe_unused, +static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char decode[20]) { @@ -271,7 +272,7 @@ static bool ioport_event_end(struct perf_evsel *evsel, return kvm_entry_event(evsel); } -static void ioport_event_decode_key(struct perf_kvm *kvm __maybe_unused, +static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char decode[20]) { @@ -286,7 +287,7 @@ static struct kvm_events_ops ioport_events = { .name = "IO Port Access" }; -static bool register_kvm_events_ops(struct perf_kvm *kvm) +static bool register_kvm_events_ops(struct perf_kvm_stat *kvm) { bool ret = true; @@ -311,7 +312,7 @@ struct vcpu_event_record { }; -static void init_kvm_event_record(struct perf_kvm *kvm) +static void init_kvm_event_record(struct perf_kvm_stat *kvm) { int i; @@ -360,7 +361,7 @@ static struct kvm_event *kvm_alloc_init_event(struct event_key *key) return event; } -static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm, +static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, struct event_key *key) { struct kvm_event *event; @@ -381,7 +382,7 @@ static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm, return event; } -static bool handle_begin_event(struct perf_kvm *kvm, +static bool handle_begin_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, u64 timestamp) { @@ -425,7 +426,7 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id, return true; } -static bool handle_end_event(struct perf_kvm *kvm, +static bool handle_end_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, u64 timestamp) @@ -486,7 +487,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread, return thread->priv; } -static bool handle_kvm_event(struct perf_kvm *kvm, +static bool handle_kvm_event(struct perf_kvm_stat *kvm, struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample) @@ -541,7 +542,7 @@ static struct kvm_event_key keys[] = { { NULL, NULL } }; -static bool select_key(struct perf_kvm *kvm) +static bool select_key(struct perf_kvm_stat *kvm) { int i; @@ -577,7 +578,8 @@ static void insert_to_result(struct rb_root *result, struct kvm_event *event, rb_insert_color(&event->rb, result); } -static void update_total_count(struct perf_kvm *kvm, struct kvm_event *event) +static void +update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) { int vcpu = kvm->trace_vcpu; @@ -590,7 +592,7 @@ static bool event_is_valid(struct kvm_event *event, int vcpu) return !!get_event_count(event, vcpu); } -static void sort_result(struct perf_kvm *kvm) +static void sort_result(struct perf_kvm_stat *kvm) { unsigned int i; int vcpu = kvm->trace_vcpu; @@ -627,7 +629,7 @@ static void print_vcpu_info(int vcpu) pr_info("VCPU %d:\n\n", vcpu); } -static void print_result(struct perf_kvm *kvm) +static void print_result(struct perf_kvm_stat *kvm) { char decode[20]; struct kvm_event *event; @@ -670,7 +672,8 @@ static int process_sample_event(struct perf_tool *tool, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, sample->tid); - struct perf_kvm *kvm = container_of(tool, struct perf_kvm, tool); + struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, + tool); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -701,7 +704,7 @@ static int get_cpu_isa(struct perf_session *session) return isa; } -static int read_events(struct perf_kvm *kvm) +static int read_events(struct perf_kvm_stat *kvm) { int ret; @@ -750,7 +753,7 @@ static bool verify_vcpu(int vcpu) return true; } -static int kvm_events_report_vcpu(struct perf_kvm *kvm) +static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) { int ret = -EINVAL; int vcpu = kvm->trace_vcpu; @@ -798,7 +801,8 @@ static const char * const record_args[] = { _p; \ }) -static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv) +static int +kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv) { unsigned int rec_argc, i, j; const char **rec_argv; @@ -821,7 +825,8 @@ static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv) +static int +kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv) { const struct option kvm_events_report_options[] = { OPT_STRING(0, "event", &kvm->report_event, "report event", @@ -864,24 +869,37 @@ static void print_kvm_stat_usage(void) printf("\nOtherwise, it is the alias of 'perf stat':\n"); } -static int kvm_cmd_stat(struct perf_kvm *kvm, int argc, const char **argv) +static int kvm_cmd_stat(const char *file_name, int argc, const char **argv) { + struct perf_kvm_stat kvm = { + .file_name = file_name, + + .trace_vcpu = -1, + .report_event = "vmexit", + .sort_key = "sample", + + .exit_reasons = svm_exit_reasons, + .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons), + .exit_reasons_isa = "SVM", + }; + if (argc == 1) { print_kvm_stat_usage(); goto perf_stat; } if (!strncmp(argv[1], "rec", 3)) - return kvm_events_record(kvm, argc - 1, argv + 1); + return kvm_events_record(&kvm, argc - 1, argv + 1); if (!strncmp(argv[1], "rep", 3)) - return kvm_events_report(kvm, argc - 1 , argv + 1); + return kvm_events_report(&kvm, argc - 1 , argv + 1); perf_stat: return cmd_stat(argc, argv, NULL); } +#endif -static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv) +static int __cmd_record(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; @@ -890,7 +908,7 @@ static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv) rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("record"); rec_argv[i++] = strdup("-o"); - rec_argv[i++] = strdup(kvm->file_name); + rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -899,7 +917,7 @@ static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv) +static int __cmd_report(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; @@ -908,7 +926,7 @@ static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv) rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("report"); rec_argv[i++] = strdup("-i"); - rec_argv[i++] = strdup(kvm->file_name); + rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -917,7 +935,8 @@ static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv) return cmd_report(i, rec_argv, NULL); } -static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv) +static int +__cmd_buildid_list(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; @@ -926,7 +945,7 @@ static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv) rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("buildid-list"); rec_argv[i++] = strdup("-i"); - rec_argv[i++] = strdup(kvm->file_name); + rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -937,20 +956,12 @@ static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv) int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) { - struct perf_kvm kvm = { - .trace_vcpu = -1, - .report_event = "vmexit", - .sort_key = "sample", - - .exit_reasons = svm_exit_reasons, - .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons), - .exit_reasons_isa = "SVM", - }; + const char *file_name; const struct option kvm_options[] = { - OPT_STRING('i', "input", &kvm.file_name, "file", + OPT_STRING('i', "input", &file_name, "file", "Input file name"), - OPT_STRING('o', "output", &kvm.file_name, "file", + OPT_STRING('o', "output", &file_name, "file", "Output file name"), OPT_BOOLEAN(0, "guest", &perf_guest, "Collect guest os data"), @@ -985,32 +996,34 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) if (!perf_host) perf_guest = 1; - if (!kvm.file_name) { + if (!file_name) { if (perf_host && !perf_guest) - kvm.file_name = strdup("perf.data.host"); + file_name = strdup("perf.data.host"); else if (!perf_host && perf_guest) - kvm.file_name = strdup("perf.data.guest"); + file_name = strdup("perf.data.guest"); else - kvm.file_name = strdup("perf.data.kvm"); + file_name = strdup("perf.data.kvm"); - if (!kvm.file_name) { + if (!file_name) { pr_err("Failed to allocate memory for filename\n"); return -ENOMEM; } } if (!strncmp(argv[0], "rec", 3)) - return __cmd_record(&kvm, argc, argv); + return __cmd_record(file_name, argc, argv); else if (!strncmp(argv[0], "rep", 3)) - return __cmd_report(&kvm, argc, argv); + return __cmd_report(file_name, argc, argv); else if (!strncmp(argv[0], "diff", 4)) return cmd_diff(argc, argv, NULL); else if (!strncmp(argv[0], "top", 3)) return cmd_top(argc, argv, NULL); else if (!strncmp(argv[0], "buildid-list", 12)) - return __cmd_buildid_list(&kvm, argc, argv); + return __cmd_buildid_list(file_name, argc, argv); +#if defined(__i386__) || defined(__x86_64__) else if (!strncmp(argv[0], "stat", 4)) - return kvm_cmd_stat(&kvm, argc, argv); + return kvm_cmd_stat(file_name, argc, argv); +#endif else usage_with_options(kvm_usage, kvm_options); diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 484f26cc0c00..5acd6e8e658b 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -15,7 +15,7 @@ #include "util/thread_map.h" #include "util/pmu.h" #include "event-parse.h" -#include "../../include/linux/hw_breakpoint.h" +#include #include diff --git a/tools/perf/perf.h b/tools/perf/perf.h index c50985eaec41..238f923f2218 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -5,8 +5,9 @@ struct winsize; void get_term_dimensions(struct winsize *ws); +#include + #if defined(__i386__) -#include "../../arch/x86/include/asm/unistd.h" #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #define CPUINFO_PROC "model name" @@ -16,7 +17,6 @@ void get_term_dimensions(struct winsize *ws); #endif #if defined(__x86_64__) -#include "../../arch/x86/include/asm/unistd.h" #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #define CPUINFO_PROC "model name" @@ -26,20 +26,17 @@ void get_term_dimensions(struct winsize *ws); #endif #ifdef __powerpc__ -#include "../../arch/powerpc/include/asm/unistd.h" #define rmb() asm volatile ("sync" ::: "memory") #define cpu_relax() asm volatile ("" ::: "memory"); #define CPUINFO_PROC "cpu" #endif #ifdef __s390__ -#include "../../arch/s390/include/asm/unistd.h" #define rmb() asm volatile("bcr 15,0" ::: "memory") #define cpu_relax() asm volatile("" ::: "memory"); #endif #ifdef __sh__ -#include "../../arch/sh/include/asm/unistd.h" #if defined(__SH4A__) || defined(__SH5__) # define rmb() asm volatile("synco" ::: "memory") #else @@ -50,35 +47,30 @@ void get_term_dimensions(struct winsize *ws); #endif #ifdef __hppa__ -#include "../../arch/parisc/include/asm/unistd.h" #define rmb() asm volatile("" ::: "memory") #define cpu_relax() asm volatile("" ::: "memory"); #define CPUINFO_PROC "cpu" #endif #ifdef __sparc__ -#include "../../arch/sparc/include/uapi/asm/unistd.h" #define rmb() asm volatile("":::"memory") #define cpu_relax() asm volatile("":::"memory") #define CPUINFO_PROC "cpu" #endif #ifdef __alpha__ -#include "../../arch/alpha/include/asm/unistd.h" #define rmb() asm volatile("mb" ::: "memory") #define cpu_relax() asm volatile("" ::: "memory") #define CPUINFO_PROC "cpu model" #endif #ifdef __ia64__ -#include "../../arch/ia64/include/asm/unistd.h" #define rmb() asm volatile ("mf" ::: "memory") #define cpu_relax() asm volatile ("hint @pause" ::: "memory") #define CPUINFO_PROC "model name" #endif #ifdef __arm__ -#include "../../arch/arm/include/asm/unistd.h" /* * Use the __kuser_memory_barrier helper in the CPU helper page. See * arch/arm/kernel/entry-armv.S in the kernel source for details. @@ -89,13 +81,11 @@ void get_term_dimensions(struct winsize *ws); #endif #ifdef __aarch64__ -#include "../../arch/arm64/include/asm/unistd.h" #define rmb() asm volatile("dmb ld" ::: "memory") #define cpu_relax() asm volatile("yield" ::: "memory") #endif #ifdef __mips__ -#include "../../arch/mips/include/asm/unistd.h" #define rmb() asm volatile( \ ".set mips2\n\t" \ "sync\n\t" \ @@ -112,7 +102,7 @@ void get_term_dimensions(struct winsize *ws); #include #include -#include "../../include/uapi/linux/perf_event.h" +#include #include "util/types.h" #include diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 618d41140abd..d144d464ce39 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -18,8 +18,8 @@ #include "cpumap.h" #include "thread_map.h" #include "target.h" -#include "../../../include/linux/hw_breakpoint.h" -#include "../../../include/uapi/linux/perf_event.h" +#include +#include #include "perf_regs.h" #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 6f94d6dea00f..d99b476ef37c 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -3,7 +3,8 @@ #include #include -#include "../../../include/uapi/linux/perf_event.h" +#include +#include #include "types.h" #include "xyarray.h" #include "cgroup.h" diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 7daad237dea5..566b84c695c8 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1378,6 +1378,8 @@ static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, str = tmp + 1; fprintf(fp, "# node%u cpu list : %s\n", c, str); + + str += strlen(str) + 1; } return; error: diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 879d215cdac9..9bc00783f24f 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -1,7 +1,7 @@ #ifndef __PERF_HEADER_H #define __PERF_HEADER_H -#include "../../../include/uapi/linux/perf_event.h" +#include #include #include #include "types.h" diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c index 516ecd9ddd6e..6ef213b35ecd 100644 --- a/tools/perf/util/parse-events-test.c +++ b/tools/perf/util/parse-events-test.c @@ -3,7 +3,7 @@ #include "evsel.h" #include "evlist.h" #include "sysfs.h" -#include "../../../include/linux/hw_breakpoint.h" +#include #define TEST_ASSERT_VAL(text, cond) \ do { \ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 75c7b0fca6d9..6b6d03e93c3d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1,4 +1,4 @@ -#include "../../../include/linux/hw_breakpoint.h" +#include #include "util.h" #include "../perf.h" #include "evlist.h" diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 839230ceb18b..2820c407adb2 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -7,7 +7,7 @@ #include #include #include "types.h" -#include "../../../include/uapi/linux/perf_event.h" +#include #include "types.h" struct list_head; diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 39f3abac7744..fdeb8ac7c5d2 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -2,7 +2,7 @@ #define __PMU_H #include -#include "../../../include/uapi/linux/perf_event.h" +#include enum { PERF_PMU_FORMAT_VALUE_CONFIG, diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index dd6426163ba6..0eae00ad5fe7 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -7,7 +7,7 @@ #include "symbol.h" #include "thread.h" #include -#include "../../../include/uapi/linux/perf_event.h" +#include struct sample_queue; struct ip_callchain; diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c index 2eeb51baf077..cfa906882e2c 100644 --- a/tools/perf/util/strbuf.c +++ b/tools/perf/util/strbuf.c @@ -90,17 +90,17 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...) if (!strbuf_avail(sb)) strbuf_grow(sb, 64); va_start(ap, fmt); - len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); va_end(ap); if (len < 0) - die("your vscnprintf is broken"); + die("your vsnprintf is broken"); if (len > strbuf_avail(sb)) { strbuf_grow(sb, len); va_start(ap, fmt); - len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); va_end(ap); if (len > strbuf_avail(sb)) { - die("this should not happen, your snprintf is broken"); + die("this should not happen, your vsnprintf is broken"); } } strbuf_setlen(sb, sb->len + len); diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 96ce80a3743b..2964b96aa55f 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -1,8 +1,11 @@ -ifeq ("$(origin O)", "command line") +ifeq ($(origin O), command line) dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) ABSOLUTE_O := $(shell cd $(O) ; pwd) - OUTPUT := $(ABSOLUTE_O)/ + OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) COMMAND_O := O=$(ABSOLUTE_O) +ifeq ($(objtree),) + objtree := $(O) +endif endif ifneq ($(OUTPUT),) @@ -41,7 +44,16 @@ else NO_SUBDIR = : endif -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +# +# Define a callable command for descending to a new directory +# +# Call by doing: $(call descend,directory[,target]) +# +descend = \ + +mkdir -p $(OUTPUT)$(1) && \ + $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2) + +QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir QUIET_SUBDIR1 = ifneq ($(findstring $(MAKEFLAGS),s),s) @@ -56,5 +68,10 @@ ifndef V $(MAKE) $(PRINT_DIR) -C $$subdir QUIET_FLEX = @echo ' ' FLEX $@; QUIET_BISON = @echo ' ' BISON $@; + + descend = \ + @echo ' ' DESCEND $(1); \ + mkdir -p $(OUTPUT)$(1) && \ + $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2) endif endif