VERSION = 3
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
NAME = Terrified Chipmunk
# *DOCUMENTATION*
# Clear a bunch of variables before executing the submake
tools/: FORCE
- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
+ $(Q)mkdir -p $(objtree)/tools
+ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/
tools/%: FORCE
- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
+ $(Q)mkdir -p $(objtree)/tools
+ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ $*
# Single targets
# ---------------------------------------------------------------------------
select CPU_FEROCEON
select GENERIC_CLOCKEVENTS
select PCI
+ select PCI_QUIRKS
select PLAT_ORION_LEGACY
help
Support for the following Marvell Kirkwood series SoCs:
.set_mode = sp804_set_mode,
.set_next_event = sp804_set_next_event,
.rating = 300,
- .cpumask = cpu_all_mask,
};
static struct irqaction sp804_timer_irq = {
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
evt->name = name;
evt->irq = irq;
+ evt->cpumask = cpu_possible_mask;
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
static inline int irq_to_pmu(int irq)
{
- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
+ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
return irq - IRQ_DOVE_PMU_START;
return -EINVAL;
int pin = irq_to_pmu(d->irq);
u32 u;
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
u = ~(1 << (pin & 31));
- writel(u, PMU_INTERRUPT_CAUSE);
+ u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
+ writel_relaxed(u, PMU_INTERRUPT_CAUSE);
}
static struct irq_chip pmu_irq_chip = {
* Enable the IO window to be way up high, at 0xfffffc00
*/
local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
+ local_write_config(0x40, 4, 0x000080FF); /* No TRDY time limit */
} else {
printk("PCI: IXP4xx is target - No bus scan performed\n");
}
.pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
.length = IXP4XX_PCI_CFG_REGION_SIZE,
.type = MT_DEVICE
- },
-#ifdef CONFIG_DEBUG_LL
- { /* Debug UART mapping */
- .virtual = (unsigned long)IXP4XX_DEBUG_UART_BASE_VIRT,
- .pfn = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS),
- .length = IXP4XX_DEBUG_UART_REGION_SIZE,
+ }, { /* Queue Manager */
+ .virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT,
+ .pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
+ .length = IXP4XX_QMGR_REGION_SIZE,
.type = MT_DEVICE
- }
-#endif
+ },
};
void __init ixp4xx_map_io(void)
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/pci.h>
+#include <asm/system_info.h>
#define SLOT_ETHA 0x0B /* IDSEL = AD21 */
#define SLOT_ETHB 0x0C /* IDSEL = AD20 */
};
-static struct platform_device *device_tab[6] __initdata = {
+static struct platform_device *device_tab[7] __initdata = {
&device_flash, /* index 0 */
};
#else
mov \rp, #0
#endif
- orr \rv, \rp, #0xff000000 @ virtual
- orr \rv, \rv, #0x00b00000
+ orr \rv, \rp, #0xfe000000 @ virtual
+ orr \rv, \rv, #0x00f00000
orr \rp, \rp, #0xc8000000 @ physical
.endm
*
* 0x50000000 0x10000000 ioremap'd EXP BUS
*
- * 0x6000000 0x00004000 ioremap'd QMgr
+ * 0xC8000000 0x00013000 0xFEF00000 On-Chip Peripherals
*
- * 0xC0000000 0x00001000 0xffbff000 PCI CFG
+ * 0xC0000000 0x00001000 0xFEF13000 PCI CFG
*
- * 0xC4000000 0x00001000 0xffbfe000 EXP CFG
+ * 0xC4000000 0x00001000 0xFEF14000 EXP CFG
*
- * 0xC8000000 0x00013000 0xffbeb000 On-Chip Peripherals
+ * 0x60000000 0x00004000 0xFEF15000 QMgr
*/
/*
* Queue Manager
*/
-#define IXP4XX_QMGR_BASE_PHYS (0x60000000)
-#define IXP4XX_QMGR_REGION_SIZE (0x00004000)
+#define IXP4XX_QMGR_BASE_PHYS 0x60000000
+#define IXP4XX_QMGR_BASE_VIRT IOMEM(0xFEF15000)
+#define IXP4XX_QMGR_REGION_SIZE 0x00004000
/*
- * Expansion BUS Configuration registers
+ * Peripheral space, including debug UART. Must be section-aligned so that
+ * it can be used with the low-level debug code.
*/
-#define IXP4XX_EXP_CFG_BASE_PHYS (0xC4000000)
-#define IXP4XX_EXP_CFG_BASE_VIRT IOMEM(0xFFBFE000)
-#define IXP4XX_EXP_CFG_REGION_SIZE (0x00001000)
+#define IXP4XX_PERIPHERAL_BASE_PHYS 0xC8000000
+#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFEF00000)
+#define IXP4XX_PERIPHERAL_REGION_SIZE 0x00013000
/*
* PCI Config registers
*/
-#define IXP4XX_PCI_CFG_BASE_PHYS (0xC0000000)
-#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFFBFF000)
-#define IXP4XX_PCI_CFG_REGION_SIZE (0x00001000)
-
-/*
- * Peripheral space
- */
-#define IXP4XX_PERIPHERAL_BASE_PHYS (0xC8000000)
-#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFFBEB000)
-#define IXP4XX_PERIPHERAL_REGION_SIZE (0x00013000)
+#define IXP4XX_PCI_CFG_BASE_PHYS 0xC0000000
+#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFEF13000)
+#define IXP4XX_PCI_CFG_REGION_SIZE 0x00001000
/*
- * Debug UART
- *
- * This is basically a remap of UART1 into a region that is section
- * aligned so that it * can be used with the low-level debug code.
+ * Expansion BUS Configuration registers
*/
-#define IXP4XX_DEBUG_UART_BASE_PHYS (0xC8000000)
-#define IXP4XX_DEBUG_UART_BASE_VIRT IOMEM(0xffb00000)
-#define IXP4XX_DEBUG_UART_REGION_SIZE (0x00001000)
+#define IXP4XX_EXP_CFG_BASE_PHYS 0xC4000000
+#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEF14000
+#define IXP4XX_EXP_CFG_REGION_SIZE 0x00001000
#define IXP4XX_EXP_CS0_OFFSET 0x00
#define IXP4XX_EXP_CS1_OFFSET 0x04
static inline void qmgr_put_entry(unsigned int queue, u32 val)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
#if DEBUG_QMGR
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
static inline u32 qmgr_get_entry(unsigned int queue)
{
u32 val;
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
val = __raw_readl(&qmgr_regs->acc[queue][0]);
#if DEBUG_QMGR
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
static inline int __qmgr_get_stat1(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
>> ((queue & 7) << 2)) & 0xF;
}
static inline int __qmgr_get_stat2(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
BUG_ON(queue >= HALF_QUEUES);
return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
>> ((queue & 0xF) << 1)) & 0x3;
*/
static inline int qmgr_stat_below_low_watermark(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
if (queue >= HALF_QUEUES)
return (__raw_readl(&qmgr_regs->statne_h) >>
(queue - HALF_QUEUES)) & 0x01;
*/
static inline int qmgr_stat_full(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
if (queue >= HALF_QUEUES)
return (__raw_readl(&qmgr_regs->statf_h) >>
(queue - HALF_QUEUES)) & 0x01;
/* NPE mailbox_status value for reset */
#define RESET_MBOX_STAT 0x0000F0F0
-const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" };
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
#define print_npe(pri, npe, fmt, ...) \
printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
EXPORT_SYMBOL(npe_names);
EXPORT_SYMBOL(npe_running);
#include <linux/module.h>
#include <mach/qmgr.h>
-struct qmgr_regs __iomem *qmgr_regs;
+static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
static struct resource *mem_res;
static spinlock_t qmgr_lock;
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
if (mem_res == NULL)
return -EBUSY;
- qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
- if (qmgr_regs == NULL) {
- err = -ENOMEM;
- goto error_map;
- }
-
/* reset qmgr registers */
for (i = 0; i < 4; i++) {
__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
error_irq2:
free_irq(IRQ_IXP4XX_QM1, NULL);
error_irq:
- iounmap(qmgr_regs);
-error_map:
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
return err;
}
free_irq(IRQ_IXP4XX_QM2, NULL);
synchronize_irq(IRQ_IXP4XX_QM1);
synchronize_irq(IRQ_IXP4XX_QM2);
- iounmap(qmgr_regs);
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
}
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Krzysztof Halasa");
-EXPORT_SYMBOL(qmgr_regs);
EXPORT_SYMBOL(qmgr_set_irq);
EXPORT_SYMBOL(qmgr_enable_irq);
EXPORT_SYMBOL(qmgr_disable_irq);
return 1;
}
+/*
+ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
+ * is operating as a root complex this needs to be switched to
+ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
+ * the device. Decoding setup is handled by the orion code.
+ */
static void __devinit rc_pci_fixup(struct pci_dev *dev)
{
- /*
- * Prevent enumeration of root complex.
- */
if (dev->bus->parent == NULL && dev->devfn == 0) {
int i;
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
chan->number, __func__, buf);
- if (chan->end == NULL)
+ if (chan->end == NULL) {
pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
chan->number, __func__, chan);
-
- chan->end->next = buf;
- chan->end = buf;
+ } else {
+ chan->end->next = buf;
+ chan->end = buf;
+ }
}
/* if necessary, update the next buffer field */
__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
__SYSCALL(369, sys_prlimit64)
__SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, sys_open_by_handle_at)
+__SYSCALL(371, compat_sys_open_by_handle_at)
__SYSCALL(372, sys_clock_adjtime)
__SYSCALL(373, sys_syncfs)
--- /dev/null
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SETUP_H
+#define _ASM_C6X_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+#ifndef __ASSEMBLY__
+extern char c6x_command_line[COMMAND_LINE_SIZE];
+
+extern int c6x_add_memory(phys_addr_t start, unsigned long size);
+
+extern unsigned long ram_start;
+extern unsigned long ram_end;
+
+extern int c6x_num_cores;
+extern unsigned int c6x_silicon_rev;
+extern unsigned int c6x_devstat;
+extern unsigned char c6x_fuse_mac[6];
+
+extern void machine_init(unsigned long dt_ptr);
+extern void time_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_C6X_SETUP_H */
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += kvm_para.h
+
header-y += byteorder.h
header-y += kvm_para.h
header-y += ptrace.h
+++ /dev/null
-#include <asm-generic/kvm_para.h>
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_SETUP_H
-#define _ASM_C6X_SETUP_H
+#ifndef _UAPI_ASM_C6X_SETUP_H
+#define _UAPI_ASM_C6X_SETUP_H
#define COMMAND_LINE_SIZE 1024
-#ifndef __ASSEMBLY__
-extern char c6x_command_line[COMMAND_LINE_SIZE];
-
-extern int c6x_add_memory(phys_addr_t start, unsigned long size);
-
-extern unsigned long ram_start;
-extern unsigned long ram_end;
-
-extern int c6x_num_cores;
-extern unsigned int c6x_silicon_rev;
-extern unsigned int c6x_devstat;
-extern unsigned char c6x_fuse_mac[6];
-
-extern void machine_init(unsigned long dt_ptr);
-extern void time_init(void);
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_C6X_SETUP_H */
+#endif /* _UAPI_ASM_C6X_SETUP_H */
[A1] BNOP .S1 work_resched,5
work_notifysig:
+ ;; enable interrupts for do_notify_resume()
+ UNMASK_INT B2
B .S2 do_notify_resume
LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
ADDKPC .S2 resume_userspace,B3,1
ENDPROC(ret_from_kernel_execve)
;;
- ;; These are the interrupt handlers, responsible for calling __do_IRQ()
- ;; int6 is used for syscalls (see _system_call entry)
+ ;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
;;
.macro SAVE_ALL_INT
SAVE_ALL IRP,ITSR
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1) == -EFAULT)
goto badframe;
return rval;
{
struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
sigset_t set;
- stack_t st;
/*
* Since we stacked the signal on a dword boundary,
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
- goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack(&st, NULL, regs->sp);
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
+ goto badframe;
return regs->gpr[11];
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
- ENTRY_SAME(open) /* 5 */
+ ENTRY_COMP(open) /* 5 */
ENTRY_SAME(close)
ENTRY_SAME(waitpid)
ENTRY_SAME(creat)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
- jg sys_open # branch to system call
+ jg compat_sys_open # branch to system call
ENTRY(sys32_close_wrapper)
llgfr %r2,%r2 # unsigned int
{
struct rt_sigframe __user *frame;
sigset_t set;
- stack_t st;
int sig;
/* Always make any pending restarted system calls return -EINTR */
else if (sig)
force_sig(sig, current);
- if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
- goto badframe;
-
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]);
+ if (do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs->regs[0]) == -EFAULT)
+ goto badframe;
regs->is_syscall = 0;
__asm__ __volatile__(
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
sigset_t set;
- stack_t __user st;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
goto badframe;
regs->pc -= 4;
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
- goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack(&st, NULL, REF_REG_SP);
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, REF_REG_SP) == -EFAULT)
+ goto badframe;
return (int) ret;
"err = %d\n", ret);
force_sig(SIGKILL, current);
}
+ get_safe_registers(current_pt_regs()->regs.gp,
+ current_pt_regs()->regs.fp);
__switch_mm(¤t->mm->context.id);
}
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
- get_safe_registers(regs->regs.gp, regs->regs.fp);
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
current->ptrace &= ~PT_DTRACE;
header-y += msr-index.h
header-y += msr.h
header-y += mtrr.h
+header-y += perf_regs.h
header-y += posix_types_32.h
header-y += posix_types_64.h
header-y += posix_types_x32.h
header-y += processor-flags.h
header-y += ptrace-abi.h
header-y += sigcontext32.h
+header-y += svm.h
header-y += ucontext.h
header-y += vm86.h
+header-y += vmx.h
header-y += vsyscall.h
genhdr-y += unistd_32.h
typedef struct { int preload; } fpu_switch_t;
/*
- * FIXME! We could do a totally lazy restore, but we need to
- * add a per-cpu "this was the task that last touched the FPU
- * on this CPU" variable, and the task needs to have a "I last
- * touched the FPU on this CPU" and check them.
+ * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * on this CPU.
*
- * We don't do that yet, so "fpu_lazy_restore()" always returns
- * false, but some day..
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
*/
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+ per_cpu(fpu_owner_task, cpu) = NULL;
+}
+
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
return new == this_cpu_read_stable(fpu_owner_task) &&
* be using the global pages.
*
* NOTE! If we are on a 486 we may have no cr4 at all!
- * Specifically, cr4 exists if and only if CPUID exists,
- * which in turn exists if and only if EFLAGS.ID exists.
+ * Specifically, cr4 exists if and only if CPUID exists
+ * and has flags other than the FPU flag set.
*/
movl $X86_EFLAGS_ID,%ecx
pushl %ecx
testl %ecx,%eax
jz 6f # No ID flag = no CPUID = no CR4
+ movl $1,%eax
+ cpuid
+ andl $~1,%edx # Ignore CPUID.FPU
+ jz 6f # No flags or only CPUID.FPU = no CR4
+
movl pa(mmu_cr4_features),%eax
movl %eax,%cr4
{
bool step;
+ /*
+ * We may come here right after calling schedule_user()
+ * or do_notify_resume(), in which case we can be in RCU
+ * user mode.
+ */
+ rcu_user_exit();
+
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
#include <asm/mwait.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+ /* the FPU context is blank, nobody can own it */
+ __cpu_disable_lazy_restore(cpu);
+
err = do_boot_cpu(apicid, cpu, tidle);
if (err) {
pr_debug("do_boot_cpu failed %d\n", err);
_ASM_EXTABLE(1b, 3b) \
: "=m" ((ctxt)->eflags), "=&r" (_tmp), \
"+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
- : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
- "a" (*rax), "d" (*rdx)); \
+ : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
} while (0)
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
res = loader_verify(lb, dev, rec);
if (res)
break;
+ rec = ihex_next_binrec(rec);
}
release_firmware(fw);
if (!res)
If unsure, say Y.
config HW_RANDOM_IXP4XX
- tristate "Intel IXP4xx NPU HW Random Number Generator support"
+ tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
depends on HW_RANDOM && ARCH_IXP4XX
default HW_RANDOM
---help---
- This driver provides kernel-side support for the Random
- Number Generator hardware found on the Intel IXP4xx NPU.
+ This driver provides kernel-side support for the Pseudo-Random
+ Number Generator hardware found on the Intel IXP45x/46x NPU.
To compile this driver as a module, choose M here: the
module will be called ixp4xx-rng.
void __iomem * rng_base;
int err;
+ if (!cpu_is_ixp46x()) /* includes IXP455 */
+ return -ENOSYS;
+
rng_base = ioremap(0x70002100, 4);
if (!rng_base)
return -ENOMEM;
module_exit(ixp4xx_rng_exit);
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
+MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
MODULE_LICENSE("GPL");
static const struct file_operations raw_fops = {
.read = do_sync_read,
- .aio_read = blkdev_aio_read,
+ .aio_read = generic_file_aio_read,
.write = do_sync_write,
.aio_write = blkdev_aio_write,
.fsync = blkdev_fsync,
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
- case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
- case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
- case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ case 16: keylen_cfg = MOD_AES128; break;
+ case 24: keylen_cfg = MOD_AES192; break;
+ case 32: keylen_cfg = MOD_AES256; break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else if (cipher_cfg & MOD_3DES) {
dimm->cschannel = chn;
/* Increment csrow location */
- row++;
- if (row == tot_csrows) {
- row = 0;
+ if (layers[0].is_virt_csrow) {
chn++;
+ if (chn == tot_channels) {
+ chn = 0;
+ row++;
+ }
+ } else {
+ row++;
+ if (row == tot_csrows) {
+ row = 0;
+ chn++;
+ }
}
/* Increment dimm location */
[0] = "Memory Write error on non-redundant retry or "
"FBD configuration Write error on retry",
};
-#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
-#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
+#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
+#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
#define FERR_NF_FBD 0xa0
static const char *ferr_nf_fbd_name[] = {
[1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
[0] = "Uncorrectable Data ECC on Replay",
};
-#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
+#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
(1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
(1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
errnum = find_first_bit(&errors,
ARRAY_SIZE(ferr_nf_fbd_name));
specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
- branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
+ branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
REDMEMA, &syndrome);
struct device_attribute *mattr, \
const char *data, size_t count) \
{ \
- struct mem_ctl_info *mci = to_mci(dev); \
+ struct mem_ctl_info *mci = dev_get_drvdata(dev); \
struct i7core_pvt *pvt; \
long value; \
int rc; \
struct device_attribute *mattr, \
char *data) \
{ \
- struct mem_ctl_info *mci = to_mci(dev); \
+ struct mem_ctl_info *mci = dev_get_drvdata(dev); \
struct i7core_pvt *pvt; \
\
pvt = mci->pvt_info; \
struct device_attribute *mattr, \
char *data) \
{ \
- struct mem_ctl_info *mci = to_mci(dev); \
+ struct mem_ctl_info *mci = dev_get_drvdata(dev); \
struct i7core_pvt *pvt = mci->pvt_info; \
\
edac_dbg(1, "\n"); \
static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *mch_window)
{
- static const char *labels[4] = {
- "DIMM A1", "DIMM A2",
- "DIMM B1", "DIMM B2"
- };
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
dimm = mci->csrows[index]->channels[chan]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels;
- strncpy(csrow->channels[chan]->dimm->label,
- labels[(index >> 1) + (chan * 2)],
- EDAC_MC_LABEL_LEN);
+
+ snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
+ (chan == 0) ? 'A' : 'B',
+ index);
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
dimm->dtype = i82975x_dram_type(mch_window, index);
dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
* already updated or not by exynos_drm_encoder_dpms function.
*/
exynos_encoder->updated = true;
+
+ /*
+ * In case of setcrtc, there is no way to update encoder's dpms
+ * so update it here.
+ */
+ exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
* because the setting for disabling the overlay will be updated
* at vsync.
*/
- if (overlay_ops->wait_for_vblank)
+ if (overlay_ops && overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
}
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
+ fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
+ offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
unsigned int timing_base;
};
-struct fimd_driver_data exynos4_fimd_driver_data = {
+static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
};
-struct fimd_driver_data exynos5_fimd_driver_data = {
+static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
};
return ret;
plane->crtc = crtc;
- plane->fb = crtc->fb;
exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume %dbpp panel color "
- "depth.\n",
- dev_priv->edp.bpp);
- }
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
- /* eDP data */
- dev_priv->edp.bpp = 18;
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
/* Use VBT settings if we have an eDP panel */
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
- if (edp_bpc < display_bpc) {
+ if (edp_bpc && edp_bpc < display_bpc) {
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- if (INTEL_INFO(dev)->gen == 5) {
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable rc6 on ilk if VT-d is on. */
- if (intel_iommu_gfx_mapped)
- return false;
-#endif
- DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
- return INTEL_RC6_ENABLE;
- }
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
if (IS_HASWELL(dev)) {
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.cloneable = true;
-
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
return true;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
- intel_sdvo->base.cloneable = false;
-
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
goto err_output;
}
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
- } else if (ASIC_IS_AVIVO(rdev)) {
- /* in DP mode, the DP ref clock can come from either PPLL
- * depending on the asic:
- * DCE3: PPLL1 or PPLL2
- */
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
- /* use the same PPLL for all DP monitors */
- pll = radeon_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = radeon_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- /* all other cases */
- pll_in_use = radeon_get_pll_use_mask(crtc);
- /* the order shouldn't matter here, but we probably
- * need this until we have atomic modeset
- */
- if (rdev->flags & RADEON_IS_IGP) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- }
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
} else {
/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+ /* some atombios (observed in some DCE2/DCE3) code have a bug,
+ * the matching btw pll and crtc is done through
+ * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+ * pll (1 or 2) to select which register to write. ie if using
+ * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+ * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+ * choose which value to write. Which is reverse order from
+ * register logic. So only case that works is when pllid is
+ * same as crtcid or when both pll and crtc are enabled and
+ * both use same clock.
+ *
+ * So just return crtc id as if crtc and pll were hard linked
+ * together even if they aren't
+ */
return radeon_crtc->crtc_id;
}
}
struct r1conf *conf = mddev->private;
struct bio *bio;
- if (from_schedule) {
+ if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
struct r10conf *conf = mddev->private;
struct bio *bio;
- if (from_schedule) {
+ if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
- if (mutex_lock_interruptible(&gsc->lock))
- return -ERESTARTSYS;
+ mutex_lock(&gsc->lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
gsc_ctrls_delete(ctx);
gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
gsc->vdev.release = video_device_release_empty;
gsc->vdev.lock = &gsc->lock;
+ gsc->vdev.vfl_dir = VFL_DIR_M2M;
snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
GSC_MODULE_NAME, gsc->id);
#define GSC_IN_ROT_YFLIP (2 << 16)
#define GSC_IN_ROT_XFLIP (1 << 16)
#define GSC_IN_RGB_TYPE_MASK (3 << 14)
-#define GSC_IN_RGB_HD_WIDE (3 << 14)
-#define GSC_IN_RGB_HD_NARROW (2 << 14)
-#define GSC_IN_RGB_SD_WIDE (1 << 14)
-#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_RGB_HD_NARROW (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (2 << 14)
+#define GSC_IN_RGB_SD_NARROW (1 << 14)
+#define GSC_IN_RGB_SD_WIDE (0 << 14)
#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
-#define GSC_OUT_RGB_HD_NARROW (3 << 10)
-#define GSC_OUT_RGB_HD_WIDE (2 << 10)
-#define GSC_OUT_RGB_SD_NARROW (1 << 10)
-#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_RGB_HD_WIDE (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (2 << 10)
+#define GSC_OUT_RGB_SD_WIDE (1 << 10)
+#define GSC_OUT_RGB_SD_NARROW (0 << 10)
#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ mutex_lock(&fimc->lock);
if (--fimc->vid_cap.refcnt == 0) {
clear_bit(ST_CAPT_BUSY, &fimc->state);
if (ret)
return ret;
+ fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd);
+
ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
- if (ret)
+ if (ret) {
fimc_unregister_m2m_device(fimc);
+ fimc->pipeline_ops = NULL;
+ }
return ret;
}
if (video_is_registered(&fimc->vid_cap.vfd)) {
video_unregister_device(&fimc->vid_cap.vfd);
media_entity_cleanup(&fimc->vid_cap.vfd.entity);
+ fimc->pipeline_ops = NULL;
}
kfree(fimc->vid_cap.ctx);
fimc->vid_cap.ctx = NULL;
struct fimc_lite *fimc = video_drvdata(file);
int ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ mutex_lock(&fimc->lock);
if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
clear_bit(ST_FLITE_IN_USE, &fimc->state);
return ret;
video_set_drvdata(vfd, fimc);
+ fimc->pipeline_ops = v4l2_get_subdev_hostdata(sd);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
media_entity_cleanup(&vfd->entity);
+ fimc->pipeline_ops = NULL;
return ret;
}
if (video_is_registered(&fimc->vfd)) {
video_unregister_device(&fimc->vfd);
media_entity_cleanup(&fimc->vfd.entity);
+ fimc->pipeline_ops = NULL;
}
}
dbg("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ mutex_lock(&fimc->lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
fimc_ctrls_delete(ctx);
sd = &fimc->vid_cap.subdev;
sd->grp_id = FIMC_GROUP_ID;
+ v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (ret) {
return ret;
}
- fimc->pipeline_ops = &fimc_pipeline_ops;
fmd->fimc[fimc->id] = fimc;
return 0;
}
return 0;
fimc->subdev.grp_id = FLITE_GROUP_ID;
+ v4l2_set_subdev_hostdata(&fimc->subdev, (void *)&fimc_pipeline_ops);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
if (ret) {
return ret;
}
- fimc->pipeline_ops = &fimc_pipeline_ops;
fmd->fimc_lite[fimc->index] = fimc;
return 0;
}
ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
get_consumed_stream, dev);
if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
- s5p_mfc_hw_call(dev->mfc_ops,
- get_dec_frame_type, dev) ==
- S5P_FIMV_DECODE_FRAME_P_FRAME
- && ctx->consumed_stream + STUFF_BYTE <
- src_buf->b->v4l2_planes[0].bytesused) {
+ ctx->consumed_stream + STUFF_BYTE <
+ src_buf->b->v4l2_planes[0].bytesused) {
/* Run MFC again on the same buffer */
mfc_debug(2, "Running again the same buffer\n");
ctx->after_packed_pb = 1;
int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
{
- return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6);
+ return mfc_read(dev, S5P_FIMV_D_DECODED_LUMA_ADDR_V6);
}
int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
* until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we
- * ask the memory allocator to avoid re-trying, swapping, writing back
- * or performing I/O.
+ * ask the memory allocator to avoid re-trying.
*
* Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
- gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
- __GFP_NORETRY | __GFP_NO_KSWAPD;
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;
/*-------------------------- Device entry points ----------------------------*/
+static void bond_work_init_all(struct bonding *bond)
+{
+ INIT_DELAYED_WORK(&bond->mcast_work,
+ bond_resend_igmp_join_requests_delayed);
+ INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
+ INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
+ else
+ INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
+ INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
+}
+
+static void bond_work_cancel_all(struct bonding *bond)
+{
+ cancel_delayed_work_sync(&bond->mii_work);
+ cancel_delayed_work_sync(&bond->arp_work);
+ cancel_delayed_work_sync(&bond->alb_work);
+ cancel_delayed_work_sync(&bond->ad_work);
+ cancel_delayed_work_sync(&bond->mcast_work);
+}
+
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
}
read_unlock(&bond->lock);
- INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
+ bond_work_init_all(bond);
if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer
* is started.
*/
- if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
- /* something went wrong - fail the open operation */
+ if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
return -ENOMEM;
- }
-
- INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
- if (bond->params.miimon) { /* link check interval, in milliseconds. */
- INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
+ if (bond->params.miimon) /* link check interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->mii_work, 0);
- }
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
- INIT_DELAYED_WORK(&bond->arp_work,
- bond_activebackup_arp_mon);
- else
- INIT_DELAYED_WORK(&bond->arp_work,
- bond_loadbalance_arp_mon);
-
queue_delayed_work(bond->wq, &bond->arp_work, 0);
if (bond->params.arp_validate)
bond->recv_probe = bond_arp_rcv;
}
if (bond->params.mode == BOND_MODE_8023AD) {
- INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
struct bonding *bond = netdev_priv(bond_dev);
write_lock_bh(&bond->lock);
-
bond->send_peer_notif = 0;
-
write_unlock_bh(&bond->lock);
- if (bond->params.miimon) { /* link check interval, in milliseconds. */
- cancel_delayed_work_sync(&bond->mii_work);
- }
-
- if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
- cancel_delayed_work_sync(&bond->arp_work);
- }
-
- switch (bond->params.mode) {
- case BOND_MODE_8023AD:
- cancel_delayed_work_sync(&bond->ad_work);
- break;
- case BOND_MODE_TLB:
- case BOND_MODE_ALB:
- cancel_delayed_work_sync(&bond->alb_work);
- break;
- default:
- break;
- }
-
- if (delayed_work_pending(&bond->mcast_work))
- cancel_delayed_work_sync(&bond->mcast_work);
-
+ bond_work_cancel_all(bond);
if (bond_is_lb(bond)) {
/* Must be called only after all
* slaves have been released
bond_dev->features |= bond_dev->hw_features;
}
-static void bond_work_cancel_all(struct bonding *bond)
-{
- if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
- cancel_delayed_work_sync(&bond->mii_work);
-
- if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
- cancel_delayed_work_sync(&bond->arp_work);
-
- if (bond->params.mode == BOND_MODE_ALB &&
- delayed_work_pending(&bond->alb_work))
- cancel_delayed_work_sync(&bond->alb_work);
-
- if (bond->params.mode == BOND_MODE_8023AD &&
- delayed_work_pending(&bond->ad_work))
- cancel_delayed_work_sync(&bond->ad_work);
-
- if (delayed_work_pending(&bond->mcast_work))
- cancel_delayed_work_sync(&bond->mcast_work);
-}
-
/*
* Destroy a bonding device.
* Must be under rtnl_lock when this function is called.
arp_ip_count++) {
/* not complete check, but should be good enough to
catch mistakes */
- if (!isdigit(arp_ip_target[arp_ip_count][0])) {
+ __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
+ if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
+ ip == 0 || ip == htonl(INADDR_BROADCAST)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[arp_ip_count]);
arp_interval = 0;
} else {
- __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
arp_target[arp_ip_count] = ip;
}
}
int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ if (!rtnl_trylock())
+ return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no arp_interval value specified.\n",
bond->dev->name);
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
- if (delayed_work_pending(&bond->mii_work)) {
- cancel_delayed_work(&bond->mii_work);
- flush_workqueue(bond->wq);
- }
}
if (!bond->params.arp_targets[0]) {
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
* timer will get fired off when the open function
* is called.
*/
- if (!delayed_work_pending(&bond->arp_work)) {
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
- INIT_DELAYED_WORK(&bond->arp_work,
- bond_activebackup_arp_mon);
- else
- INIT_DELAYED_WORK(&bond->arp_work,
- bond_loadbalance_arp_mon);
-
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
- }
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ if (!rtnl_trylock())
+ return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no miimon value specified.\n",
bond->dev->name);
bond->params.arp_validate =
BOND_ARP_VALIDATE_NONE;
}
- if (delayed_work_pending(&bond->arp_work)) {
- cancel_delayed_work(&bond->arp_work);
- flush_workqueue(bond->wq);
- }
}
if (bond->dev->flags & IFF_UP) {
* timer will get fired off when the open function
* is called.
*/
- if (!delayed_work_pending(&bond->mii_work)) {
- INIT_DELAYED_WORK(&bond->mii_work,
- bond_mii_monitor);
- queue_delayed_work(bond->wq,
- &bond->mii_work, 0);
- }
+ cancel_delayed_work_sync(&bond->arp_work);
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
}
}
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
goto out;
}
+ read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->inactive = 1;
}
}
+ read_unlock(&bond->lock);
out:
return ret;
}
mc->pdev->dev.can.state = new_state;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts->hwtstamp = timeval_to_ktime(tv);
}
netif_rx(skb);
struct sk_buff *skb;
struct can_frame *cf;
struct timeval tv;
+ struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(mc->netdev, &cf);
if (!skb)
/* convert timestamp into kernel time */
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = timeval_to_ktime(tv);
/* push the skb */
netif_rx(skb);
struct can_frame *can_frame;
struct sk_buff *skb;
struct timeval tv;
+ struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(netdev, &can_frame);
if (!skb)
memcpy(can_frame->data, rx->data, can_frame->can_dlc);
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = timeval_to_ktime(tv);
netif_rx(skb);
netdev->stats.rx_packets++;
u8 err_mask = 0;
struct sk_buff *skb;
struct timeval tv;
+ struct skb_shared_hwtstamps *hwts;
/* nothing should be sent while in BUS_OFF state */
if (dev->can.state == CAN_STATE_BUS_OFF)
dev->can.state = new_state;
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = timeval_to_ktime(tv);
netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
if (err)
return err;
- memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
+ memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
return 0;
}
static int cp_alloc_rings (struct cp_private *cp)
{
+ struct device *d = &cp->pdev->dev;
void *mem;
+ int rc;
- mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
- &cp->ring_dma, GFP_KERNEL);
+ mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
cp->rx_ring = mem;
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
- return cp_init_rings(cp);
+ rc = cp_init_rings(cp);
+ if (rc < 0)
+ dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
+
+ return rc;
}
static void cp_clean_rings (struct cp_private *cp)
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
- dev->hw_features = NETIF_F_HW_VLAN_TX |
+ dev->hw_features = TEAM_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
dev->features |= dev->hw_features;
}
},
/* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
platform_set_drvdata(pdev, port);
- netdev_info(dev, "HSS-%i\n", port->id);
+ netdev_info(dev, "initialized\n");
return 0;
err_free_netdev:
* As a consequence, it's not as complicated as it sounds, just add
* any lower rates to the ACK rate bitmap.
*/
- if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
- ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
- if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
- ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
- if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
- ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
/* 1M already there or needed so always add */
cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
return vq;
}
-static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
- struct rproc *rproc = vdev_to_rproc(vdev);
struct rproc_vring *rvring;
- /* power down the remote processor before deleting vqs */
- rproc_shutdown(rproc);
-
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
rvring = vq->priv;
rvring->vq = NULL;
}
}
+static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct rproc *rproc = vdev_to_rproc(vdev);
+
+ /* power down the remote processor before deleting vqs */
+ rproc_shutdown(rproc);
+
+ __rproc_virtio_del_vqs(vdev);
+}
+
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
return 0;
error:
- rproc_virtio_del_vqs(vdev);
+ __rproc_virtio_del_vqs(vdev);
return ret;
}
static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
- tps65910_rtc_alarm_irq_enable(&rtc->dev, 0);
+ tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
- rtc_device_unregister(rtc);
+ rtc_device_unregister(tps_rtc->rtc);
return 0;
}
/*
* If the received CDB has aleady been aborted stop processing it here.
*/
- if (transport_check_aborted_status(cmd, 1))
+ if (transport_check_aborted_status(cmd, 1)) {
+ complete(&cmd->t_transport_stop_comp);
return;
+ }
/*
* Determine if IOCTL context caller in requesting the stopping of this
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
}
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
- _iov->iov_len = min((u64)len, size);
+ _iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
spin_unlock(&dst->wb.list_lock);
}
-sector_t blkdev_max_block(struct block_device *bdev)
-{
- sector_t retval = ~((sector_t)0);
- loff_t sz = i_size_read(bdev->bd_inode);
-
- if (sz) {
- unsigned int size = block_size(bdev);
- unsigned int sizebits = blksize_bits(size);
- retval = (sz >> sizebits);
- }
- return retval;
-}
-
/* Kill _all_ buffers and pagecache , dirty or not.. */
void kill_bdev(struct block_device *bdev)
{
int set_blocksize(struct block_device *bdev, int size)
{
- struct address_space *mapping;
-
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
if (size < bdev_logical_block_size(bdev))
return -EINVAL;
- /* Prevent starting I/O or mapping the device */
- percpu_down_write(&bdev->bd_block_size_semaphore);
-
- /* Check that the block device is not memory mapped */
- mapping = bdev->bd_inode->i_mapping;
- mutex_lock(&mapping->i_mmap_mutex);
- if (mapping_mapped(mapping)) {
- mutex_unlock(&mapping->i_mmap_mutex);
- percpu_up_write(&bdev->bd_block_size_semaphore);
- return -EBUSY;
- }
- mutex_unlock(&mapping->i_mmap_mutex);
-
/* Don't change the size if it is same as current */
if (bdev->bd_block_size != size) {
sync_blockdev(bdev);
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
-
- percpu_up_write(&bdev->bd_block_size_semaphore);
-
return 0;
}
blkdev_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
- if (iblock >= blkdev_max_block(I_BDEV(inode))) {
- if (create)
- return -EIO;
-
- /*
- * for reads, we're just trying to fill a partial page.
- * return a hole, they will have to call get_block again
- * before they can fill it, and they will get -EIO at that
- * time
- */
- return 0;
- }
bh->b_bdev = I_BDEV(inode);
bh->b_blocknr = iblock;
set_buffer_mapped(bh);
return 0;
}
-static int
-blkdev_get_blocks(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
-{
- sector_t end_block = blkdev_max_block(I_BDEV(inode));
- unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
-
- if ((iblock + max_blocks) > end_block) {
- max_blocks = end_block - iblock;
- if ((long)max_blocks <= 0) {
- if (create)
- return -EIO; /* write fully beyond EOF */
- /*
- * It is a read which is fully beyond EOF. We return
- * a !buffer_mapped buffer
- */
- max_blocks = 0;
- }
- }
-
- bh->b_bdev = I_BDEV(inode);
- bh->b_blocknr = iblock;
- bh->b_size = max_blocks << inode->i_blkbits;
- if (max_blocks)
- set_buffer_mapped(bh);
- return 0;
-}
-
static ssize_t
blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
struct inode *inode = file->f_mapping->host;
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
- nr_segs, blkdev_get_blocks, NULL, NULL, 0);
+ nr_segs, blkdev_get_block, NULL, NULL, 0);
}
int __sync_blockdev(struct block_device *bdev, int wait)
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
if (!ei)
return NULL;
-
- if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) {
- kmem_cache_free(bdev_cachep, ei);
- return NULL;
- }
-
return &ei->vfs_inode;
}
struct inode *inode = container_of(head, struct inode, i_rcu);
struct bdev_inode *bdi = BDEV_I(inode);
- percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore);
-
kmem_cache_free(bdev_cachep, bdi);
}
return blkdev_ioctl(bdev, mode, cmd, arg);
}
-ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-{
- ssize_t ret;
- struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
-
- percpu_down_read(&bdev->bd_block_size_semaphore);
-
- ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
-
- percpu_up_read(&bdev->bd_block_size_semaphore);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(blkdev_aio_read);
-
/*
* Write data to the block device. Only intended for the block device itself
* and the raw driver which basically is a fake block device.
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
blk_start_plug(&plug);
-
- percpu_down_read(&bdev->bd_block_size_semaphore);
-
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
if (err < 0 && ret > 0)
ret = err;
}
-
- percpu_up_read(&bdev->bd_block_size_semaphore);
-
blk_finish_plug(&plug);
-
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_aio_write);
-static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int ret;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
-
- percpu_down_read(&bdev->bd_block_size_semaphore);
-
- ret = generic_file_mmap(file, vma);
-
- percpu_up_read(&bdev->bd_block_size_semaphore);
-
- return ret;
-}
-
-static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
-{
- ssize_t ret;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
-
- percpu_down_read(&bdev->bd_block_size_semaphore);
-
- ret = generic_file_splice_read(file, ppos, pipe, len, flags);
-
- percpu_up_read(&bdev->bd_block_size_semaphore);
-
- return ret;
-}
-
-static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe,
- struct file *file, loff_t *ppos, size_t len,
- unsigned int flags)
-{
- ssize_t ret;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
-
- percpu_down_read(&bdev->bd_block_size_semaphore);
-
- ret = generic_file_splice_write(pipe, file, ppos, len, flags);
-
- percpu_up_read(&bdev->bd_block_size_semaphore);
-
- return ret;
-}
-
-
/*
* Try to release a page associated with block device when the system
* is under memory pressure.
.llseek = block_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = blkdev_aio_read,
+ .aio_read = generic_file_aio_read,
.aio_write = blkdev_aio_write,
- .mmap = blkdev_mmap,
+ .mmap = generic_file_mmap,
.fsync = blkdev_fsync,
.unlocked_ioctl = block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_blkdev_ioctl,
#endif
- .splice_read = blkdev_splice_read,
- .splice_write = blkdev_splice_write,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
};
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
attach_page_buffers(page, head);
}
+static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
+{
+ sector_t retval = ~((sector_t)0);
+ loff_t sz = i_size_read(bdev->bd_inode);
+
+ if (sz) {
+ unsigned int sizebits = blksize_bits(size);
+ retval = (sz >> sizebits);
+ }
+ return retval;
+}
+
/*
* Initialise the state of a blockdev page's buffers.
*/
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
int uptodate = PageUptodate(page);
- sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
+ sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
do {
if (!buffer_mapped(bh)) {
}
EXPORT_SYMBOL(unmap_underlying_metadata);
+/*
+ * Size is a power-of-two in the range 512..PAGE_SIZE,
+ * and the case we care about most is PAGE_SIZE.
+ *
+ * So this *could* possibly be written with those
+ * constraints in mind (relevant mostly if some
+ * architecture has a slow bit-scan instruction)
+ */
+static inline int block_size_bits(unsigned int blocksize)
+{
+ return ilog2(blocksize);
+}
+
+static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
+{
+ BUG_ON(!PageLocked(page));
+
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
+ return page_buffers(page);
+}
+
/*
* NOTE! All mapped/uptodate combinations are valid:
*
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
- const unsigned blocksize = 1 << inode->i_blkbits;
+ unsigned int blocksize, bbits;
int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : WRITE);
- BUG_ON(!PageLocked(page));
-
- last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
-
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, blocksize,
+ head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate));
- }
/*
* Be very careful. We have no exclusion from __set_page_dirty_buffers
* handle that here by just cleaning them.
*/
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- head = page_buffers(page);
bh = head;
+ blocksize = bh->b_size;
+ bbits = block_size_bits(blocksize);
+
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ last_block = (i_size_read(inode) - 1) >> bbits;
/*
* Get all the dirty buffers mapped to disk addresses and
BUG_ON(to > PAGE_CACHE_SIZE);
BUG_ON(from > to);
- blocksize = 1 << inode->i_blkbits;
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
- head = page_buffers(page);
+ head = create_page_buffers(page, inode, 0);
+ blocksize = head->b_size;
+ bbits = block_size_bits(blocksize);
- bbits = inode->i_blkbits;
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
unsigned blocksize;
struct buffer_head *bh, *head;
- blocksize = 1 << inode->i_blkbits;
+ bh = head = page_buffers(page);
+ blocksize = bh->b_size;
- for(bh = head = page_buffers(page), block_start = 0;
- bh != head || !block_start;
- block_start=block_end, bh = bh->b_this_page) {
+ block_start = 0;
+ do {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
mark_buffer_dirty(bh);
}
clear_buffer_new(bh);
- }
+
+ block_start = block_end;
+ bh = bh->b_this_page;
+ } while (bh != head);
/*
* If this is a partial write which happened to make all buffers
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from)
{
- struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
unsigned to;
struct buffer_head *bh, *head;
if (!page_has_buffers(page))
return 0;
- blocksize = 1 << inode->i_blkbits;
+ head = page_buffers(page);
+ blocksize = head->b_size;
to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
to = from + to;
if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
return 0;
- head = page_buffers(page);
bh = head;
block_start = 0;
do {
struct inode *inode = page->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- unsigned int blocksize;
+ unsigned int blocksize, bbits;
int nr, i;
int fully_mapped = 1;
- BUG_ON(!PageLocked(page));
- blocksize = 1 << inode->i_blkbits;
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
- head = page_buffers(page);
+ head = create_page_buffers(page, inode, 0);
+ blocksize = head->b_size;
+ bbits = block_size_bits(blocksize);
- iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
+ iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ lblock = (i_size_read(inode)+blocksize-1) >> bbits;
bh = head;
nr = 0;
i = 0;
struct TCP_Server_Info *server;
struct page *page;
int rc = 0;
- loff_t isize = i_size_read(mapping->host);
/*
* If wsize is smaller than the page cache size, default to writing
*/
set_page_writeback(page);
- if (page_offset(page) >= isize) {
+ if (page_offset(page) >= i_size_read(mapping->host)) {
done = true;
unlock_page(page);
end_page_writeback(page);
wdata->offset = page_offset(wdata->pages[0]);
wdata->pagesz = PAGE_CACHE_SIZE;
wdata->tailsz =
- min(isize - page_offset(wdata->pages[nr_pages - 1]),
+ min(i_size_read(mapping->host) -
+ page_offset(wdata->pages[nr_pages - 1]),
(loff_t)PAGE_CACHE_SIZE);
wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
wdata->tailsz;
dentry = d_lookup(parent, name);
if (dentry) {
+ int err;
inode = dentry->d_inode;
/* update inode in place if i_ino didn't change */
if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
cifs_fattr_to_inode(inode, fattr);
return dentry;
}
- d_drop(dentry);
+ err = d_invalidate(dentry);
dput(dentry);
+ if (err)
+ return NULL;
}
dentry = d_alloc(parent, name);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *tcon;
- FILE_BASIC_INFO info_buf;
/* if the file is already open for write, just use that fileid */
open_file = find_writable_file(cinode, true);
netpid = current->tgid;
set_via_filehandle:
- rc = CIFSSMBSetFileInfo(xid, tcon, &info_buf, netfid, netpid);
+ rc = CIFSSMBSetFileInfo(xid, tcon, buf, netfid, netpid);
if (!rc)
cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
sector_t fs_endblk; /* Into file, in filesystem-sized blocks */
unsigned long fs_count; /* Number of filesystem-sized blocks */
int create;
+ unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
/*
* If there was a memory error and we've overwritten all the
fs_count = fs_endblk - fs_startblk + 1;
map_bh->b_state = 0;
- map_bh->b_size = fs_count << dio->inode->i_blkbits;
+ map_bh->b_size = fs_count << i_blkbits;
/*
* For writes inside i_size on a DIO_SKIP_HOLES filesystem we
int seg;
size_t size;
unsigned long addr;
- unsigned blkbits = inode->i_blkbits;
+ unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
+ unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
loff_t end = offset;
dio->inode = inode;
dio->rw = rw;
sdio.blkbits = blkbits;
- sdio.blkfactor = inode->i_blkbits - blkbits;
+ sdio.blkfactor = i_blkbits - blkbits;
sdio.block_in_file = offset >> blkbits;
sdio.get_block = get_block;
const void *p)
{
struct fdtable *fdt;
- struct file *file;
int res = 0;
if (!files)
return 0;
spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- while (!res && n < fdt->max_fds) {
- file = rcu_dereference_check_fdtable(files, fdt->fd[n++]);
- if (file)
- res = f(p, file, n);
+ for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
+ struct file *file;
+ file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
+ if (!file)
+ continue;
+ res = f(p, file, n);
+ if (res)
+ break;
}
spin_unlock(&files->file_lock);
return res;
if (!len)
return ERR_PTR(-EACCES);
+ if (unlikely(name[0] == '.')) {
+ if (len < 2 || (len == 2 && name[1] == '.'))
+ return ERR_PTR(-EACCES);
+ }
+
while (len--) {
c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
nfs_refresh_inode(dentry->d_inode, entry->fattr);
goto out;
} else {
- d_drop(dentry);
+ if (d_invalidate(dentry) != 0)
+ goto out;
dput(dentry);
}
}
out_zap_parent:
nfs_zap_caches(dir);
out_bad:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
nfs_mark_for_revalidate(dir);
if (inode && S_ISDIR(inode->i_mode)) {
/* Purge readdir caches. */
shrink_dcache_parent(dentry);
}
d_drop(dentry);
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
__func__, dentry->d_parent->d_name.name,
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
- /* A semaphore that prevents I/O while block size is being changed */
- struct percpu_rw_semaphore bd_block_size_semaphore;
};
/*
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
-extern sector_t blkdev_max_block(struct block_device *bdev);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);
unsigned long *nr_segs, size_t *count, int access_flags);
/* fs/block_dev.c */
-extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
-#define ___GFP_NOTRACK 0x200000u
-#define ___GFP_NO_KSWAPD 0x400000u
-#define ___GFP_OTHER_NODE 0x800000u
-#define ___GFP_WRITE 0x1000000u
+#define ___GFP_NOTRACK 0x100000u
+#define ___GFP_OTHER_NODE 0x200000u
+#define ___GFP_WRITE 0x400000u
/*
* GFP bitmasks..
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
-#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
__GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
- __GFP_NO_KSWAPD)
+ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#ifndef _LINUX_HW_BREAKPOINT_H
#define _LINUX_HW_BREAKPOINT_H
-enum {
- HW_BREAKPOINT_LEN_1 = 1,
- HW_BREAKPOINT_LEN_2 = 2,
- HW_BREAKPOINT_LEN_4 = 4,
- HW_BREAKPOINT_LEN_8 = 8,
-};
-
-enum {
- HW_BREAKPOINT_EMPTY = 0,
- HW_BREAKPOINT_R = 1,
- HW_BREAKPOINT_W = 2,
- HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
- HW_BREAKPOINT_X = 4,
- HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
-};
-
-enum bp_type_idx {
- TYPE_INST = 0,
-#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
- TYPE_DATA = 0,
-#else
- TYPE_DATA = 1,
-#endif
- TYPE_MAX
-};
-
-#ifdef __KERNEL__
-
#include <linux/perf_event.h>
+#include <uapi/linux/hw_breakpoint.h>
#ifdef CONFIG_HAVE_HW_BREAKPOINT
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_HW_BREAKPOINT_H */
};
#define light_mb() barrier()
-#define heavy_mb() synchronize_sched()
+#define heavy_mb() synchronize_sched_expedited()
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
{
mutex_lock(&p->mtx);
p->locked = true;
- synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
+ synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters))
msleep(1);
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
- {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"
header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
+header-y += hw_breakpoint.h
--- /dev/null
+#ifndef _UAPI_LINUX_HW_BREAKPOINT_H
+#define _UAPI_LINUX_HW_BREAKPOINT_H
+
+enum {
+ HW_BREAKPOINT_LEN_1 = 1,
+ HW_BREAKPOINT_LEN_2 = 2,
+ HW_BREAKPOINT_LEN_4 = 4,
+ HW_BREAKPOINT_LEN_8 = 8,
+};
+
+enum {
+ HW_BREAKPOINT_EMPTY = 0,
+ HW_BREAKPOINT_R = 1,
+ HW_BREAKPOINT_W = 2,
+ HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+ HW_BREAKPOINT_X = 4,
+ HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
+};
+
+enum bp_type_idx {
+ TYPE_INST = 0,
+#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
+ TYPE_DATA = 0,
+#else
+ TYPE_DATA = 1,
+#endif
+ TYPE_MAX
+};
+
+#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
* Count the number of breakpoints of the same type and same task.
* The given event must be not on the list.
*/
-static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
+static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
struct task_struct *tsk = bp->hw.bp_target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
- if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
+ if (iter->hw.bp_target == tsk &&
+ find_slot_idx(iter) == type &&
+ cpu == iter->cpu)
count += hw_breakpoint_weight(iter);
}
if (!tsk)
slots->pinned += max_task_bp_pinned(cpu, type);
else
- slots->pinned += task_bp_pinned(bp, type);
+ slots->pinned += task_bp_pinned(cpu, bp, type);
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
return;
if (!tsk)
nr += max_task_bp_pinned(cpu, type);
else
- nr += task_bp_pinned(bp, type);
+ nr += task_bp_pinned(cpu, bp, type);
if (nr > slots->pinned)
slots->pinned = nr;
int old_idx = 0;
int idx = 0;
- old_count = task_bp_pinned(bp, type);
+ old_count = task_bp_pinned(cpu, bp, type);
old_idx = old_count - 1;
idx = old_idx + weight;
p->signal->autogroup = autogroup_kref_get(ag);
- if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
- goto out;
-
t = p;
do {
sched_move_task(t);
} while_each_thread(p, t);
-out:
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
#include <linux/rwsem.h>
struct autogroup {
- /*
- * reference doesn't mean how many thread attach to this
- * autogroup now. It just stands for the number of task
- * could use this autogroup.
- */
struct kref kref;
struct task_group *tg;
struct rw_semaphore lock;
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
+ /*
+ * If @delay is 0, queue @dwork->work immediately. This is for
+ * both optimization and correctness. The earliest @timer can
+ * expire is on the closest next tick and delayed_work users depend
+ * on that there's no such delay when @delay is 0.
+ */
+ if (!delay) {
+ __queue_work(cpu, wq, &dwork->work);
+ return;
+ }
+
timer_stats_timer_set_start_info(&dwork->timer);
/*
bool ret = false;
unsigned long flags;
- if (!delay)
- return queue_work_on(cpu, wq, &dwork->work);
-
/* read the comment in __queue_work() */
local_irq_save(flags);
repeat:
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
return 0;
+ }
/*
* See whether any cpu is asking for help. Unbounded
{
int ret;
unsigned long pfn = page_to_pfn(page);
+ struct page *hpage = compound_trans_head(page);
if (PageHuge(page))
return soft_offline_huge_page(page, flags);
+ if (PageTransHuge(hpage)) {
+ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+ pr_info("soft offline: %#lx: failed to split THP\n",
+ pfn);
+ return -EBUSY;
+ }
+ }
ret = get_any_page(page, pfn, flags);
if (ret < 0)
}
}
- return 1UL << order;
+ return 1UL << alloc_order;
}
/*
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
}
+/* Returns true if the allocation is likely for THP */
+static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
+{
+ if (order == pageblock_order &&
+ (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
+ return true;
+ return false;
+}
+
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
goto nopage;
restart:
- if (!(gfp_mask & __GFP_NO_KSWAPD))
+ /* The decision whether to wake kswapd for THP is made later */
+ if (!is_thp_alloc(gfp_mask, order))
wake_all_kswapd(order, zonelist, high_zoneidx,
- zone_idx(preferred_zone));
+ zone_idx(preferred_zone));
/*
* OK, we're below the kswapd watermark and have kicked background
goto got_pg;
sync_migration = true;
- /*
- * If compaction is deferred for high-order allocations, it is because
- * sync compaction recently failed. In this is the case and the caller
- * requested a movable allocation that does not heavily disrupt the
- * system then fail the allocation instead of entering direct reclaim.
- */
- if ((deferred_compaction || contended_compaction) &&
- (gfp_mask & __GFP_NO_KSWAPD))
- goto nopage;
+ if (is_thp_alloc(gfp_mask, order)) {
+ /*
+ * If compaction is deferred for high-order allocations, it is
+ * because sync compaction recently failed. If this is the case
+ * and the caller requested a movable allocation that does not
+ * heavily disrupt the system then fail the allocation instead
+ * of entering direct reclaim.
+ */
+ if (deferred_compaction || contended_compaction)
+ goto nopage;
+
+ /* If process is willing to reclaim/compact then wake kswapd */
+ wake_all_kswapd(order, zonelist, high_zoneidx,
+ zone_idx(preferred_zone));
+ }
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
{
return; /* XXX: Not implemented yet */
}
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
{
}
#else
get_order(sizeof(struct page) * nr_pages));
}
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
{
unsigned long maps_section_nr, removing_section_nr, i;
unsigned long magic;
+ struct page *page = virt_to_page(memmap);
for (i = 0; i < nr_pages; i++, page++) {
magic = (unsigned long) page->lru.next;
*/
if (memmap) {
- struct page *memmap_page;
- memmap_page = virt_to_page(memmap);
-
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT;
- free_map_bootmem(memmap_page, nr_pages);
+ free_map_bootmem(memmap, nr_pages);
}
}
} while (memcg);
}
+static bool zone_balanced(struct zone *zone, int order,
+ unsigned long balance_gap, int classzone_idx)
+{
+ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+ balance_gap, classzone_idx, 0))
+ return false;
+
+ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+ return false;
+
+ return true;
+}
+
/*
* pgdat_balanced is used when checking if a node is balanced for high-order
* allocations. Only zones that meet watermarks and are in a zone allowed
continue;
}
- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
- i, 0))
+ if (!zone_balanced(zone, order, 0, i))
all_zones_ok = false;
else
balanced += zone->present_pages;
break;
}
- if (!zone_watermark_ok_safe(zone, order,
- high_wmark_pages(zone), 0, 0)) {
+ if (!zone_balanced(zone, order, 0, 0)) {
end_zone = i;
break;
} else {
testorder = 0;
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
- !zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone) + balance_gap,
- end_zone, 0)) {
+ !zone_balanced(zone, testorder,
+ balance_gap, end_zone)) {
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
continue;
}
- if (!zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone), end_zone, 0)) {
+ if (!zone_balanced(zone, testorder, 0, end_zone)) {
all_zones_ok = 0;
/*
* We are still under min water mark. This
op->sk = sk;
op->ifindex = ifindex;
+ /* ifindex for timeout events w/o previous frame reception */
+ op->rx_ifindex = ifindex;
+
/* initialize uninitialized (kzalloc) structure */
hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->timer.function = bcm_rx_timeout_handler;
struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
- inet_putpeer(peer);
+ if (peer)
+ inet_putpeer(peer);
}
out:
return rc;
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
+ /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ if (v != RT_TABLE_DEFAULT && v >= 1000000000)
+ return -EINVAL;
+
rtnl_lock();
ret = 0;
if (sk == rtnl_dereference(mrt->mroute_sk)) {
return mss_now;
}
-static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
- size_t psize, int flags)
+static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
int mss_now, size_goal;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
- while (psize > 0) {
+ while (size > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
- struct page *page = pages[poffset / PAGE_SIZE];
int copy, i;
- int offset = poffset % PAGE_SIZE;
- int size = min_t(size_t, psize, PAGE_SIZE - offset);
bool can_coalesce;
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy;
- poffset += copy;
- if (!(psize -= copy))
+ offset += copy;
+ if (!(size -= copy))
goto out;
if (skb->len < size_goal || (flags & MSG_OOB))
flags);
lock_sock(sk);
- res = do_tcp_sendpages(sk, &page, offset, size, flags);
+ res = do_tcp_sendpages(sk, page, offset, size, flags);
release_sock(sk);
return res;
}
lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__);
+ __irttp_close_tsap(self);
return NULL;
}
list_move_tail(&roc->list, &tmp_list);
roc->abort = true;
}
-
- ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
- .len = IPSET_MAXNAMELEN - 1 },
+ .len = IFNAMSIZ - 1 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
/* We only match on the lower 8 bits of the opcode. */
if (ntohs(arp->ar_op) <= 0xff)
key->ip.proto = ntohs(arp->ar_op);
-
- if (key->ip.proto == ARPOP_REQUEST
- || key->ip.proto == ARPOP_REPLY) {
- memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
- memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
- memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
- memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
- key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
- }
+ memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+ memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+ memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+ memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
- ovs_dp_name(vport->dp),
+ netdev_vport->dev->name,
packet_length(skb), mtu);
goto error;
}
msg = sctp_datamsg_new(GFP_KERNEL);
if (!msg)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Note: Calculate this outside of the loop, so that all fragments
* have the same expiration.
chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
- if (!chunk)
+ if (!chunk) {
+ err = -ENOMEM;
goto errout;
+ }
+
err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
if (err < 0)
- goto errout;
+ goto errout_chunk_free;
offset += len;
chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
- if (!chunk)
+ if (!chunk) {
+ err = -ENOMEM;
goto errout;
+ }
err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
- (__u8 *)chunk->skb->data);
if (err < 0)
- goto errout;
+ goto errout_chunk_free;
sctp_datamsg_assign(msg, chunk);
list_add_tail(&chunk->frag_list, &msg->chunks);
return msg;
+errout_chunk_free:
+ sctp_chunk_free(chunk);
+
errout:
list_for_each_safe(pos, temp, &msg->chunks) {
list_del_init(pos);
sctp_chunk_free(chunk);
}
sctp_datamsg_put(msg);
- return NULL;
+ return ERR_PTR(err);
}
/* Check whether this message has expired. */
/* Break the message into multiple chunks of maximum size. */
datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
- if (!datamsg) {
- err = -ENOMEM;
+ if (IS_ERR(datamsg)) {
+ err = PTR_ERR(datamsg);
goto out_free;
}
* 1/8, rto_alpha would be expressed as 3.
*/
tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
- + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+ + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+ (rtt >> net->sctp.rto_alpha);
} else {
@echo ' clean: a summary clean target to clean _all_ folders'
cpupower: FORCE
- $(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1)
+ $(call descend,power/$@)
firewire lguest perf usb virtio vm: FORCE
- $(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1)
+ $(call descend,$@)
selftests: FORCE
- $(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1)
+ $(call descend,testing/$@)
turbostat x86_energy_perf_policy: FORCE
- $(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1)
+ $(call descend,power/x86/$@)
cpupower_install:
- $(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install
+ $(call descend,power/$(@:_install=),install)
firewire_install lguest_install perf_install usb_install virtio_install vm_install:
- $(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install
+ $(call descend,$(@:_install=),install)
selftests_install:
- $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install
+ $(call descend,testing/$(@:_clean=),install)
turbostat_install x86_energy_perf_policy_install:
- $(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install
+ $(call descend,power/x86/$(@:_install=),install)
install: cpupower_install firewire_install lguest_install perf_install \
selftests_install turbostat_install usb_install virtio_install \
vm_install x86_energy_perf_policy_install
cpupower_clean:
- $(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean
+ $(call descend,power/cpupower,clean)
firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
- $(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean
+ $(call descend,$(@:_clean=),clean)
selftests_clean:
- $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
+ $(call descend,testing/$(@:_clean=),clean)
turbostat_clean x86_energy_perf_policy_clean:
- $(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
+ $(call descend,power/x86/$(@:_clean=),clean)
clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \
turbostat_clean usb_clean virtio_clean vm_clean \
### --- END CONFIGURATION SECTION ---
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+ifneq ($(objtree),)
+#$(info Determined 'objtree' to be $(objtree))
+endif
+
+ifneq ($(OUTPUT),)
+#$(info Determined 'OUTPUT' to be $(OUTPUT))
+endif
+
+BASIC_CFLAGS = \
+ -Iutil/include \
+ -Iarch/$(ARCH)/include \
+ $(if $(objtree),-I$(objtree)/arch/$(ARCH)/include/generated/uapi) \
+ -I$(srctree)/arch/$(ARCH)/include/uapi \
+ -I$(srctree)/arch/$(ARCH)/include \
+ $(if $(objtree),-I$(objtree)/include/generated/uapi) \
+ -I$(srctree)/include/uapi \
+ -I$(srctree)/include \
+ -I$(OUTPUT)util \
+ -Iutil \
+ -I. \
+ -I$(TRACE_EVENT_DIR) \
+ -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
BASIC_LDFLAGS =
# Guard against environment variables
#include <stdlib.h>
#include "../../util/types.h"
-#include "../../../../../arch/x86/include/asm/perf_regs.h"
+#include <asm/perf_regs.h>
#ifndef ARCH_X86_64
#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
#include <pthread.h>
#include <math.h>
-#include "../../arch/x86/include/asm/svm.h"
-#include "../../arch/x86/include/asm/vmx.h"
-#include "../../arch/x86/include/asm/kvm.h"
+#if defined(__i386__) || defined(__x86_64__)
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
struct event_key {
#define INVALID_KEY (~0ULL)
};
-struct perf_kvm;
+struct perf_kvm_stat;
struct kvm_events_ops {
bool (*is_begin_event)(struct perf_evsel *evsel,
struct event_key *key);
bool (*is_end_event)(struct perf_evsel *evsel,
struct perf_sample *sample, struct event_key *key);
- void (*decode_key)(struct perf_kvm *kvm, struct event_key *key,
+ void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
char decode[20]);
const char *name;
};
#define EVENTS_BITS 12
#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
-struct perf_kvm {
+struct perf_kvm_stat {
struct perf_tool tool;
struct perf_session *session;
SVM_EXIT_REASONS
};
-static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code)
+static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
{
int i = kvm->exit_reasons_size;
struct exit_reasons_table *tbl = kvm->exit_reasons;
return "UNKNOWN";
}
-static void exit_event_decode_key(struct perf_kvm *kvm,
+static void exit_event_decode_key(struct perf_kvm_stat *kvm,
struct event_key *key,
char decode[20])
{
return false;
}
-static void mmio_event_decode_key(struct perf_kvm *kvm __maybe_unused,
+static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char decode[20])
{
return kvm_entry_event(evsel);
}
-static void ioport_event_decode_key(struct perf_kvm *kvm __maybe_unused,
+static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char decode[20])
{
.name = "IO Port Access"
};
-static bool register_kvm_events_ops(struct perf_kvm *kvm)
+static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
{
bool ret = true;
};
-static void init_kvm_event_record(struct perf_kvm *kvm)
+static void init_kvm_event_record(struct perf_kvm_stat *kvm)
{
int i;
return event;
}
-static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm,
+static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
struct event_key *key)
{
struct kvm_event *event;
return event;
}
-static bool handle_begin_event(struct perf_kvm *kvm,
+static bool handle_begin_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key, u64 timestamp)
{
return true;
}
-static bool handle_end_event(struct perf_kvm *kvm,
+static bool handle_end_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
u64 timestamp)
return thread->priv;
}
-static bool handle_kvm_event(struct perf_kvm *kvm,
+static bool handle_kvm_event(struct perf_kvm_stat *kvm,
struct thread *thread,
struct perf_evsel *evsel,
struct perf_sample *sample)
{ NULL, NULL }
};
-static bool select_key(struct perf_kvm *kvm)
+static bool select_key(struct perf_kvm_stat *kvm)
{
int i;
rb_insert_color(&event->rb, result);
}
-static void update_total_count(struct perf_kvm *kvm, struct kvm_event *event)
+static void
+update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
{
int vcpu = kvm->trace_vcpu;
return !!get_event_count(event, vcpu);
}
-static void sort_result(struct perf_kvm *kvm)
+static void sort_result(struct perf_kvm_stat *kvm)
{
unsigned int i;
int vcpu = kvm->trace_vcpu;
pr_info("VCPU %d:\n\n", vcpu);
}
-static void print_result(struct perf_kvm *kvm)
+static void print_result(struct perf_kvm_stat *kvm)
{
char decode[20];
struct kvm_event *event;
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, sample->tid);
- struct perf_kvm *kvm = container_of(tool, struct perf_kvm, tool);
+ struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
+ tool);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
return isa;
}
-static int read_events(struct perf_kvm *kvm)
+static int read_events(struct perf_kvm_stat *kvm)
{
int ret;
return true;
}
-static int kvm_events_report_vcpu(struct perf_kvm *kvm)
+static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
{
int ret = -EINVAL;
int vcpu = kvm->trace_vcpu;
_p; \
})
-static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv)
+static int
+kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
return cmd_record(i, rec_argv, NULL);
}
-static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv)
+static int
+kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
const struct option kvm_events_report_options[] = {
OPT_STRING(0, "event", &kvm->report_event, "report event",
printf("\nOtherwise, it is the alias of 'perf stat':\n");
}
-static int kvm_cmd_stat(struct perf_kvm *kvm, int argc, const char **argv)
+static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
{
+ struct perf_kvm_stat kvm = {
+ .file_name = file_name,
+
+ .trace_vcpu = -1,
+ .report_event = "vmexit",
+ .sort_key = "sample",
+
+ .exit_reasons = svm_exit_reasons,
+ .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
+ .exit_reasons_isa = "SVM",
+ };
+
if (argc == 1) {
print_kvm_stat_usage();
goto perf_stat;
}
if (!strncmp(argv[1], "rec", 3))
- return kvm_events_record(kvm, argc - 1, argv + 1);
+ return kvm_events_record(&kvm, argc - 1, argv + 1);
if (!strncmp(argv[1], "rep", 3))
- return kvm_events_report(kvm, argc - 1 , argv + 1);
+ return kvm_events_report(&kvm, argc - 1 , argv + 1);
perf_stat:
return cmd_stat(argc, argv, NULL);
}
+#endif
-static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv)
+static int __cmd_record(const char *file_name, int argc, const char **argv)
{
int rec_argc, i = 0, j;
const char **rec_argv;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("record");
rec_argv[i++] = strdup("-o");
- rec_argv[i++] = strdup(kvm->file_name);
+ rec_argv[i++] = strdup(file_name);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
return cmd_record(i, rec_argv, NULL);
}
-static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv)
+static int __cmd_report(const char *file_name, int argc, const char **argv)
{
int rec_argc, i = 0, j;
const char **rec_argv;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("report");
rec_argv[i++] = strdup("-i");
- rec_argv[i++] = strdup(kvm->file_name);
+ rec_argv[i++] = strdup(file_name);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
return cmd_report(i, rec_argv, NULL);
}
-static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv)
+static int
+__cmd_buildid_list(const char *file_name, int argc, const char **argv)
{
int rec_argc, i = 0, j;
const char **rec_argv;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("buildid-list");
rec_argv[i++] = strdup("-i");
- rec_argv[i++] = strdup(kvm->file_name);
+ rec_argv[i++] = strdup(file_name);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
{
- struct perf_kvm kvm = {
- .trace_vcpu = -1,
- .report_event = "vmexit",
- .sort_key = "sample",
-
- .exit_reasons = svm_exit_reasons,
- .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
- .exit_reasons_isa = "SVM",
- };
+ const char *file_name;
const struct option kvm_options[] = {
- OPT_STRING('i', "input", &kvm.file_name, "file",
+ OPT_STRING('i', "input", &file_name, "file",
"Input file name"),
- OPT_STRING('o', "output", &kvm.file_name, "file",
+ OPT_STRING('o', "output", &file_name, "file",
"Output file name"),
OPT_BOOLEAN(0, "guest", &perf_guest,
"Collect guest os data"),
if (!perf_host)
perf_guest = 1;
- if (!kvm.file_name) {
+ if (!file_name) {
if (perf_host && !perf_guest)
- kvm.file_name = strdup("perf.data.host");
+ file_name = strdup("perf.data.host");
else if (!perf_host && perf_guest)
- kvm.file_name = strdup("perf.data.guest");
+ file_name = strdup("perf.data.guest");
else
- kvm.file_name = strdup("perf.data.kvm");
+ file_name = strdup("perf.data.kvm");
- if (!kvm.file_name) {
+ if (!file_name) {
pr_err("Failed to allocate memory for filename\n");
return -ENOMEM;
}
}
if (!strncmp(argv[0], "rec", 3))
- return __cmd_record(&kvm, argc, argv);
+ return __cmd_record(file_name, argc, argv);
else if (!strncmp(argv[0], "rep", 3))
- return __cmd_report(&kvm, argc, argv);
+ return __cmd_report(file_name, argc, argv);
else if (!strncmp(argv[0], "diff", 4))
return cmd_diff(argc, argv, NULL);
else if (!strncmp(argv[0], "top", 3))
return cmd_top(argc, argv, NULL);
else if (!strncmp(argv[0], "buildid-list", 12))
- return __cmd_buildid_list(&kvm, argc, argv);
+ return __cmd_buildid_list(file_name, argc, argv);
+#if defined(__i386__) || defined(__x86_64__)
else if (!strncmp(argv[0], "stat", 4))
- return kvm_cmd_stat(&kvm, argc, argv);
+ return kvm_cmd_stat(file_name, argc, argv);
+#endif
else
usage_with_options(kvm_usage, kvm_options);
#include "util/thread_map.h"
#include "util/pmu.h"
#include "event-parse.h"
-#include "../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
#include <sys/mman.h>
void get_term_dimensions(struct winsize *ws);
+#include <asm/unistd.h>
+
#if defined(__i386__)
-#include "../../arch/x86/include/asm/unistd.h"
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC "model name"
#endif
#if defined(__x86_64__)
-#include "../../arch/x86/include/asm/unistd.h"
#define rmb() asm volatile("lfence" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC "model name"
#endif
#ifdef __powerpc__
-#include "../../arch/powerpc/include/asm/unistd.h"
#define rmb() asm volatile ("sync" ::: "memory")
#define cpu_relax() asm volatile ("" ::: "memory");
#define CPUINFO_PROC "cpu"
#endif
#ifdef __s390__
-#include "../../arch/s390/include/asm/unistd.h"
#define rmb() asm volatile("bcr 15,0" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory");
#endif
#ifdef __sh__
-#include "../../arch/sh/include/asm/unistd.h"
#if defined(__SH4A__) || defined(__SH5__)
# define rmb() asm volatile("synco" ::: "memory")
#else
#endif
#ifdef __hppa__
-#include "../../arch/parisc/include/asm/unistd.h"
#define rmb() asm volatile("" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory");
#define CPUINFO_PROC "cpu"
#endif
#ifdef __sparc__
-#include "../../arch/sparc/include/uapi/asm/unistd.h"
#define rmb() asm volatile("":::"memory")
#define cpu_relax() asm volatile("":::"memory")
#define CPUINFO_PROC "cpu"
#endif
#ifdef __alpha__
-#include "../../arch/alpha/include/asm/unistd.h"
#define rmb() asm volatile("mb" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory")
#define CPUINFO_PROC "cpu model"
#endif
#ifdef __ia64__
-#include "../../arch/ia64/include/asm/unistd.h"
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#define CPUINFO_PROC "model name"
#endif
#ifdef __arm__
-#include "../../arch/arm/include/asm/unistd.h"
/*
* Use the __kuser_memory_barrier helper in the CPU helper page. See
* arch/arm/kernel/entry-armv.S in the kernel source for details.
#endif
#ifdef __aarch64__
-#include "../../arch/arm64/include/asm/unistd.h"
#define rmb() asm volatile("dmb ld" ::: "memory")
#define cpu_relax() asm volatile("yield" ::: "memory")
#endif
#ifdef __mips__
-#include "../../arch/mips/include/asm/unistd.h"
#define rmb() asm volatile( \
".set mips2\n\t" \
"sync\n\t" \
#include <sys/types.h>
#include <sys/syscall.h>
-#include "../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
#include "util/types.h"
#include <stdbool.h>
#include "cpumap.h"
#include "thread_map.h"
#include "target.h"
-#include "../../../include/linux/hw_breakpoint.h"
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
#include "perf_regs.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#include <linux/list.h>
#include <stdbool.h>
-#include "../../../include/uapi/linux/perf_event.h"
+#include <stddef.h>
+#include <linux/perf_event.h>
#include "types.h"
#include "xyarray.h"
#include "cgroup.h"
str = tmp + 1;
fprintf(fp, "# node%u cpu list : %s\n", c, str);
+
+ str += strlen(str) + 1;
}
return;
error:
#ifndef __PERF_HEADER_H
#define __PERF_HEADER_H
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
#include <sys/types.h>
#include <stdbool.h>
#include "types.h"
#include "evsel.h"
#include "evlist.h"
#include "sysfs.h"
-#include "../../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
#define TEST_ASSERT_VAL(text, cond) \
do { \
-#include "../../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
#include "util.h"
#include "../perf.h"
#include "evlist.h"
#include <linux/list.h>
#include <stdbool.h>
#include "types.h"
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
#include "types.h"
struct list_head;
#define __PMU_H
#include <linux/bitops.h>
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
#include "symbol.h"
#include "thread.h"
#include <linux/rbtree.h>
-#include "../../../include/uapi/linux/perf_event.h"
+#include <linux/perf_event.h>
struct sample_queue;
struct ip_callchain;
if (!strbuf_avail(sb))
strbuf_grow(sb, 64);
va_start(ap, fmt);
- len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
va_end(ap);
if (len < 0)
- die("your vscnprintf is broken");
+ die("your vsnprintf is broken");
if (len > strbuf_avail(sb)) {
strbuf_grow(sb, len);
va_start(ap, fmt);
- len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
va_end(ap);
if (len > strbuf_avail(sb)) {
- die("this should not happen, your snprintf is broken");
+ die("this should not happen, your vsnprintf is broken");
}
}
strbuf_setlen(sb, sb->len + len);
-ifeq ("$(origin O)", "command line")
+ifeq ($(origin O), command line)
dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
ABSOLUTE_O := $(shell cd $(O) ; pwd)
- OUTPUT := $(ABSOLUTE_O)/
+ OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
COMMAND_O := O=$(ABSOLUTE_O)
+ifeq ($(objtree),)
+ objtree := $(O)
+endif
endif
ifneq ($(OUTPUT),)
NO_SUBDIR = :
endif
-QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
+#
+# Define a callable command for descending to a new directory
+#
+# Call by doing: $(call descend,directory[,target])
+#
+descend = \
+ +mkdir -p $(OUTPUT)$(1) && \
+ $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
+
+QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir
QUIET_SUBDIR1 =
ifneq ($(findstring $(MAKEFLAGS),s),s)
$(MAKE) $(PRINT_DIR) -C $$subdir
QUIET_FLEX = @echo ' ' FLEX $@;
QUIET_BISON = @echo ' ' BISON $@;
+
+ descend = \
+ @echo ' ' DESCEND $(1); \
+ mkdir -p $(OUTPUT)$(1) && \
+ $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
endif
endif